summaryrefslogtreecommitdiff
path: root/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/caam_dma.c108
-rw-r--r--drivers/dma/fsl-qdma.c226
2 files changed, 212 insertions, 122 deletions
diff --git a/drivers/dma/caam_dma.c b/drivers/dma/caam_dma.c
index cc9f9cf..e430b32 100644
--- a/drivers/dma/caam_dma.c
+++ b/drivers/dma/caam_dma.c
@@ -43,9 +43,8 @@ struct caam_dma_edesc {
struct dma_async_tx_descriptor async_tx;
struct list_head node;
struct caam_dma_ctx *ctx;
- dma_addr_t sec4_sg_dma;
- dma_addr_t sec4_sg_dma_dst;
- size_t sec4_sg_bytes;
+ dma_addr_t src_dma;
+ dma_addr_t dst_dma;
unsigned int src_len;
unsigned int dst_len;
struct sec4_sg_entry *sec4_sg;
@@ -74,7 +73,7 @@ static struct dma_device *dma_dev;
static struct caam_dma_sh_desc *dma_sh_desc;
static LIST_HEAD(dma_ctx_list);
-static dma_cookie_t caam_jr_tx_submit(struct dma_async_tx_descriptor *tx)
+static dma_cookie_t caam_dma_tx_submit(struct dma_async_tx_descriptor *tx)
{
struct caam_dma_edesc *edesc = NULL;
struct caam_dma_ctx *ctx = NULL;
@@ -104,12 +103,11 @@ static unsigned int caam_dma_sg_dma_len(struct scatterlist *sg,
return len;
}
-static struct caam_dma_edesc *caam_dma_edesc_alloc(struct dma_chan *chan,
- unsigned long flags,
- struct scatterlist *dst_sg,
- unsigned int dst_nents,
- struct scatterlist *src_sg,
- unsigned int src_nents)
+static struct caam_dma_edesc *
+caam_dma_sg_edesc_alloc(struct dma_chan *chan,
+ struct scatterlist *dst_sg, unsigned int dst_nents,
+ struct scatterlist *src_sg, unsigned int src_nents,
+ unsigned long flags)
{
struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
chan);
@@ -139,7 +137,7 @@ static struct caam_dma_edesc *caam_dma_edesc_alloc(struct dma_chan *chan,
}
dma_async_tx_descriptor_init(&edesc->async_tx, chan);
- edesc->async_tx.tx_submit = caam_jr_tx_submit;
+ edesc->async_tx.tx_submit = caam_dma_tx_submit;
edesc->async_tx.flags = flags;
edesc->async_tx.cookie = -EBUSY;
@@ -161,9 +159,8 @@ static struct caam_dma_edesc *caam_dma_edesc_alloc(struct dma_chan *chan,
return ERR_PTR(-ENOMEM);
}
- edesc->sec4_sg_dma = sec4_sg_dma_src;
- edesc->sec4_sg_dma_dst = sec4_sg_dma_src +
- src_nents * sizeof(*sec4_sg);
+ edesc->src_dma = sec4_sg_dma_src;
+ edesc->dst_dma = sec4_sg_dma_src + src_nents * sizeof(*sec4_sg);
edesc->ctx = ctx;
return edesc;
@@ -215,20 +212,20 @@ static void caam_dma_done(struct device *dev, u32 *hwdesc, u32 err,
callback(callback_param);
}
-void init_dma_job(dma_addr_t sh_desc_dma, struct caam_dma_edesc *edesc)
+static void caam_dma_sg_init_job_desc(struct caam_dma_edesc *edesc)
{
u32 *jd = edesc->jd;
u32 *sh_desc = dma_sh_desc->desc;
+ dma_addr_t desc_dma = dma_sh_desc->desc_dma;
/* init the job descriptor */
- init_job_desc_shared(jd, sh_desc_dma, desc_len(sh_desc), HDR_REVERSE);
+ init_job_desc_shared(jd, desc_dma, desc_len(sh_desc), HDR_REVERSE);
/* set SEQIN PTR */
- append_seq_in_ptr(jd, edesc->sec4_sg_dma, edesc->src_len, LDST_SGF);
+ append_seq_in_ptr(jd, edesc->src_dma, edesc->src_len, LDST_SGF);
/* set SEQOUT PTR */
- append_seq_out_ptr(jd, edesc->sec4_sg_dma_dst, edesc->dst_len,
- LDST_SGF);
+ append_seq_out_ptr(jd, edesc->dst_dma, edesc->dst_len, LDST_SGF);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "caam dma desc@" __stringify(__LINE__) ": ",
@@ -238,26 +235,75 @@ void init_dma_job(dma_addr_t sh_desc_dma, struct caam_dma_edesc *edesc)
/* This function can be called from an interrupt context */
static struct dma_async_tx_descriptor *
-caam_jr_prep_dma_sg(struct dma_chan *chan, struct scatterlist *dst_sg,
- unsigned int dst_nents, struct scatterlist *src_sg,
- unsigned int src_nents, unsigned long flags)
+caam_dma_prep_sg(struct dma_chan *chan, struct scatterlist *dst_sg,
+ unsigned int dst_nents, struct scatterlist *src_sg,
+ unsigned int src_nents, unsigned long flags)
{
struct caam_dma_edesc *edesc;
/* allocate extended descriptor */
- edesc = caam_dma_edesc_alloc(chan, flags, dst_sg, dst_nents, src_sg,
- src_nents);
+ edesc = caam_dma_sg_edesc_alloc(chan, dst_sg, dst_nents, src_sg,
+ src_nents, flags);
if (IS_ERR_OR_NULL(edesc))
return ERR_CAST(edesc);
/* Initialize job descriptor */
- init_dma_job(dma_sh_desc->desc_dma, edesc);
+ caam_dma_sg_init_job_desc(edesc);
+
+ return &edesc->async_tx;
+}
+
+static void caam_dma_memcpy_init_job_desc(struct caam_dma_edesc *edesc)
+{
+ u32 *jd = edesc->jd;
+ u32 *sh_desc = dma_sh_desc->desc;
+ dma_addr_t desc_dma = dma_sh_desc->desc_dma;
+
+ /* init the job descriptor */
+ init_job_desc_shared(jd, desc_dma, desc_len(sh_desc), HDR_REVERSE);
+
+ /* set SEQIN PTR */
+ append_seq_in_ptr(jd, edesc->src_dma, edesc->src_len, 0);
+
+ /* set SEQOUT PTR */
+ append_seq_out_ptr(jd, edesc->dst_dma, edesc->dst_len, 0);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "caam dma desc@" __stringify(__LINE__) ": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, jd, desc_bytes(jd), 1);
+#endif
+}
+
+static struct dma_async_tx_descriptor *
+caam_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct caam_dma_edesc *edesc;
+ struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
+ chan);
+
+ edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN, GFP_DMA | GFP_NOWAIT);
+ if (!edesc)
+ return ERR_PTR(-ENOMEM);
+
+ dma_async_tx_descriptor_init(&edesc->async_tx, chan);
+ edesc->async_tx.tx_submit = caam_dma_tx_submit;
+ edesc->async_tx.flags = flags;
+ edesc->async_tx.cookie = -EBUSY;
+
+ edesc->src_dma = src;
+ edesc->src_len = len;
+ edesc->dst_dma = dst;
+ edesc->dst_len = len;
+ edesc->ctx = ctx;
+
+ caam_dma_memcpy_init_job_desc(edesc);
return &edesc->async_tx;
}
/* This function can be called in an interrupt context */
-static void caam_jr_issue_pending(struct dma_chan *chan)
+static void caam_dma_issue_pending(struct dma_chan *chan)
{
struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
chan);
@@ -273,7 +319,7 @@ static void caam_jr_issue_pending(struct dma_chan *chan)
spin_unlock_bh(&ctx->edesc_lock);
}
-static void caam_jr_free_chan_resources(struct dma_chan *chan)
+static void caam_dma_free_chan_resources(struct dma_chan *chan)
{
struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
chan);
@@ -439,11 +485,13 @@ static int __init caam_dma_probe(struct platform_device *pdev)
dma_dev->dev = dev;
dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
dma_cap_set(DMA_SG, dma_dev->cap_mask);
+ dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
dma_dev->device_tx_status = dma_cookie_status;
- dma_dev->device_issue_pending = caam_jr_issue_pending;
- dma_dev->device_prep_dma_sg = caam_jr_prep_dma_sg;
- dma_dev->device_free_chan_resources = caam_jr_free_chan_resources;
+ dma_dev->device_issue_pending = caam_dma_issue_pending;
+ dma_dev->device_prep_dma_sg = caam_dma_prep_sg;
+ dma_dev->device_prep_dma_memcpy = caam_dma_prep_memcpy;
+ dma_dev->device_free_chan_resources = caam_dma_free_chan_resources;
err = dma_async_device_register(dma_dev);
if (err) {
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
index 6c4c281..60cd526 100644
--- a/drivers/dma/fsl-qdma.c
+++ b/drivers/dma/fsl-qdma.c
@@ -119,67 +119,111 @@
u64 pre_addr, pre_queue;
+/* qDMA Command Descriptor Fotmats */
+
+/* Compound Command Descriptor Fotmat */
struct fsl_qdma_ccdf {
- u8 status;
- u32 rev1:22;
- u32 ser:1;
- u32 rev2:1;
- u32 rev3:20;
- u32 offset:9;
- u32 format:3;
+ __le32 status; /* ser, status */
+ __le32 cfg; /* format, offset */
union {
struct {
- u32 addr_lo; /* low 32-bits of 40-bit address */
- u32 addr_hi:8; /* high 8-bits of 40-bit address */
- u32 rev4:16;
- u32 queue:3;
- u32 rev5:3;
- u32 dd:2; /* dynamic debug */
- };
- struct {
- u64 addr:40;
- /* More efficient address accessor */
- u64 __notaddress:24;
- };
+ __le32 addr_lo; /* low 32-bits of 40-bit address */
+ u8 addr_hi; /* high 8-bits of 40-bit address */
+ u8 __reserved1[2];
+ u8 cfg8b_w1; /* dd, queue*/
+ } __packed;
+ __le64 data;
};
} __packed;
+#define QDMA_CCDF_STATUS 20
+#define QDMA_CCDF_OFFSET 20
+#define QDMA_CCDF_MASK GENMASK(28, 20)
+#define QDMA_CCDF_FOTMAT BIT(29)
+#define QDMA_CCDF_SER BIT(30)
+
+static inline u64 qdma_ccdf_addr_get64(const struct fsl_qdma_ccdf *ccdf)
+{
+ return le64_to_cpu(ccdf->data) & 0xffffffffffLLU;
+}
+static inline u64 qdma_ccdf_get_queue(const struct fsl_qdma_ccdf *ccdf)
+{
+ return ccdf->cfg8b_w1 & 0xff;
+}
+static inline void qdma_ccdf_addr_set64(struct fsl_qdma_ccdf *ccdf, u64 addr)
+{
+ ccdf->addr_hi = upper_32_bits(addr);
+ ccdf->addr_lo = cpu_to_le32(lower_32_bits(addr));
+}
+static inline int qdma_ccdf_get_offset(const struct fsl_qdma_ccdf *ccdf)
+{
+ return (le32_to_cpu(ccdf->cfg) & QDMA_CCDF_MASK) >> QDMA_CCDF_OFFSET;
+}
+static inline void qdma_ccdf_set_format(struct fsl_qdma_ccdf *ccdf, int offset)
+{
+ ccdf->cfg = cpu_to_le32(QDMA_CCDF_FOTMAT | offset);
+}
+static inline int qdma_ccdf_get_status(const struct fsl_qdma_ccdf *ccdf)
+{
+ return (le32_to_cpu(ccdf->status) & QDMA_CCDF_MASK) >> QDMA_CCDF_STATUS;
+}
+static inline void qdma_ccdf_set_ser(struct fsl_qdma_ccdf *ccdf, int status)
+{
+ ccdf->status = cpu_to_le32(QDMA_CCDF_SER | status);
+}
+/* qDMA Compound S/G Format */
struct fsl_qdma_csgf {
- u32 offset:13;
- u32 rev1:19;
- u32 length:30;
- u32 f:1;
- u32 e:1;
+ __le32 offset; /* offset */
+ __le32 cfg; /* E bit, F bit, length */
union {
struct {
- u32 addr_lo; /* low 32-bits of 40-bit address */
- u32 addr_hi:8; /* high 8-bits of 40-bit address */
- u32 rev2:24;
- };
- struct {
- u64 addr:40;
- /* More efficient address accessor */
- u64 __notaddress:24;
+ __le32 addr_lo; /* low 32-bits of 40-bit address */
+ u8 addr_hi; /* high 8-bits of 40-bit address */
+ u8 __reserved1[3];
};
+ __le64 data;
};
} __packed;
+#define QDMA_SG_FIN BIT(30)
+#define QDMA_SG_EXT BIT(31)
+#define QDMA_SG_LEN_MASK GENMASK(29, 0)
+static inline u64 qdma_csgf_addr_get64(const struct fsl_qdma_csgf *sg)
+{
+ return be64_to_cpu(sg->data) & 0xffffffffffLLU;
+}
+static inline void qdma_csgf_addr_set64(struct fsl_qdma_csgf *sg, u64 addr)
+{
+ sg->addr_hi = upper_32_bits(addr);
+ sg->addr_lo = cpu_to_le32(lower_32_bits(addr));
+}
+static inline void qdma_csgf_set_len(struct fsl_qdma_csgf *csgf, int len)
+{
+ csgf->cfg = cpu_to_le32(len & QDMA_SG_LEN_MASK);
+}
+static inline void qdma_csgf_set_f(struct fsl_qdma_csgf *csgf, int len)
+{
+ csgf->cfg = cpu_to_le32(QDMA_SG_FIN | (len & QDMA_SG_LEN_MASK));
+}
+static inline void qdma_csgf_set_e(struct fsl_qdma_csgf *csgf, int len)
+{
+ csgf->cfg = cpu_to_le32(QDMA_SG_EXT | (len & QDMA_SG_LEN_MASK));
+}
+
+/* qDMA Source Descriptor Format */
struct fsl_qdma_sdf {
- u32 rev3:32;
- u32 ssd:12; /* souce stride distance */
- u32 sss:12; /* souce stride size */
- u32 rev4:8;
- u32 rev5:32;
- u32 cmd;
+ __le32 rev3;
+ __le32 cfg; /* rev4, bit[0-11] - ssd, bit[12-23] sss */
+ __le32 rev5;
+ __le32 cmd;
} __packed;
+/*qDMA Destination Descriptor Format*/
struct fsl_qdma_ddf {
- u32 rev1:32;
- u32 dsd:12; /* Destination stride distance */
- u32 dss:12; /* Destination stride size */
- u32 rev2:8;
- u32 rev3:32;
- u32 cmd;
+ __le32 rev1;
+ __le32 cfg; /* rev2, bit[0-11] - dsd, bit[12-23] - dss */
+ __le32 rev3;
+ __le32 cmd;
} __packed;
struct fsl_qdma_chan {
@@ -304,24 +348,27 @@ static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
memset(fsl_comp->virt_addr, 0, FSL_QDMA_BASE_BUFFER_SIZE);
/* Head Command Descriptor(Frame Descriptor) */
- ccdf->addr = fsl_comp->bus_addr + 16;
- ccdf->format = 1; /* Compound S/G format */
+ qdma_ccdf_addr_set64(ccdf, fsl_comp->bus_addr + 16);
+ qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(ccdf));
+ qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(ccdf));
/* Status notification is enqueued to status queue. */
- ccdf->ser = 1;
/* Compound Command Descriptor(Frame List Table) */
- csgf_desc->addr = fsl_comp->bus_addr + 64;
+ qdma_csgf_addr_set64(csgf_desc, fsl_comp->bus_addr + 64);
/* It must be 32 as Compound S/G Descriptor */
- csgf_desc->length = 32;
- csgf_src->addr = src;
- csgf_src->length = len;
- csgf_dest->addr = dst;
- csgf_dest->length = len;
+ qdma_csgf_set_len(csgf_desc, 32);
+ qdma_csgf_addr_set64(csgf_src, src);
+ qdma_csgf_set_len(csgf_src, len);
+ qdma_csgf_addr_set64(csgf_dest, dst);
+ qdma_csgf_set_len(csgf_dest, len);
/* This entry is the last entry. */
- csgf_dest->f = FSL_QDMA_F_LAST_ENTRY;
+ qdma_csgf_set_f(csgf_dest, len);
/* Descriptor Buffer */
- sdf->cmd = FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET;
- ddf->cmd = FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET;
- ddf->cmd |= FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET;
+ sdf->cmd = cpu_to_le32(
+ FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET);
+ ddf->cmd = cpu_to_le32(
+ FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET);
+ ddf->cmd |= cpu_to_le32(
+ FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET);
}
static void fsl_qdma_comp_fill_sg(
@@ -345,49 +392,48 @@ static void fsl_qdma_comp_fill_sg(
csgf_dest = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 3;
sdf = (struct fsl_qdma_sdf *)fsl_comp->virt_addr + 4;
ddf = (struct fsl_qdma_ddf *)fsl_comp->virt_addr + 5;
-
memset(fsl_comp->virt_addr, 0, FSL_QDMA_BASE_BUFFER_SIZE);
/* Head Command Descriptor(Frame Descriptor) */
- ccdf->addr = fsl_comp->bus_addr + 16;
- ccdf->format = 1; /* Compound S/G format */
+ qdma_ccdf_addr_set64(ccdf, fsl_comp->bus_addr + 16);
+ qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(ccdf));
/* Status notification is enqueued to status queue. */
- ccdf->ser = 1;
+ qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(ccdf));
/* Compound Command Descriptor(Frame List Table) */
- csgf_desc->addr = fsl_comp->bus_addr + 64;
+ qdma_csgf_addr_set64(csgf_desc, fsl_comp->bus_addr + 64);
/* It must be 32 as Compound S/G Descriptor */
- csgf_desc->length = 32;
+ qdma_csgf_set_len(csgf_desc, 32);
sg_block = fsl_comp->sg_block;
- csgf_src->addr = sg_block->bus_addr;
+ qdma_csgf_addr_set64(csgf_src, sg_block->bus_addr);
/* This entry link to the s/g entry. */
- csgf_src->e = FSL_QDMA_E_SG_TABLE;
+ qdma_csgf_set_e(csgf_src, 32);
temp = sg_block + fsl_comp->sg_block_src;
- csgf_dest->addr = temp->bus_addr;
+ qdma_csgf_addr_set64(csgf_dest, temp->bus_addr);
/* This entry is the last entry. */
- csgf_dest->f = FSL_QDMA_F_LAST_ENTRY;
+ qdma_csgf_set_f(csgf_dest, 32);
/* This entry link to the s/g entry. */
- csgf_dest->e = FSL_QDMA_E_SG_TABLE;
+ qdma_csgf_set_e(csgf_dest, 32);
for_each_sg(src_sg, sg, src_nents, i) {
temp = sg_block + i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1);
csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr +
i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1);
- csgf_sg->addr = sg_dma_address(sg);
- csgf_sg->length = sg_dma_len(sg);
+ qdma_csgf_addr_set64(csgf_sg, sg_dma_address(sg));
+ qdma_csgf_set_len(csgf_sg, sg_dma_len(sg));
total_src_len += sg_dma_len(sg);
if (i == src_nents - 1)
- csgf_sg->f = FSL_QDMA_F_LAST_ENTRY;
+ qdma_csgf_set_f(csgf_sg, sg_dma_len(sg));
if (i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) ==
FSL_QDMA_EXPECT_SG_ENTRY_NUM - 2) {
csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr +
FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1;
temp = sg_block +
i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;
- csgf_sg->addr = temp->bus_addr;
- csgf_sg->e = FSL_QDMA_E_SG_TABLE;
+ qdma_csgf_addr_set64(csgf_sg, temp->bus_addr);
+ qdma_csgf_set_e(csgf_sg, sg_dma_len(sg));
}
}
@@ -396,20 +442,20 @@ static void fsl_qdma_comp_fill_sg(
temp = sg_block + i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1);
csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr +
i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1);
- csgf_sg->addr = sg_dma_address(sg);
- csgf_sg->length = sg_dma_len(sg);
+ qdma_csgf_addr_set64(csgf_sg, sg_dma_address(sg));
+ qdma_csgf_set_len(csgf_sg, sg_dma_len(sg));
total_dst_len += sg_dma_len(sg);
if (i == dst_nents - 1)
- csgf_sg->f = FSL_QDMA_F_LAST_ENTRY;
+ qdma_csgf_set_f(csgf_sg, sg_dma_len(sg));
if (i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) ==
FSL_QDMA_EXPECT_SG_ENTRY_NUM - 2) {
csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr +
FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1;
temp = sg_block +
i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;
- csgf_sg->addr = temp->bus_addr;
- csgf_sg->e = FSL_QDMA_E_SG_TABLE;
+ qdma_csgf_addr_set64(csgf_sg, temp->bus_addr);
+ qdma_csgf_set_e(csgf_sg, sg_dma_len(sg));
}
}
@@ -417,12 +463,10 @@ static void fsl_qdma_comp_fill_sg(
dev_err(&fsl_comp->qchan->vchan.chan.dev->device,
"The data length for src and dst isn't match.\n");
- csgf_src->length = total_src_len;
- csgf_dest->length = total_dst_len;
+ qdma_csgf_set_len(csgf_src, total_src_len);
+ qdma_csgf_set_len(csgf_dest, total_dst_len);
/* Descriptor Buffer */
- sdf->cmd = FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET;
- ddf->cmd = FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET;
}
/*
@@ -694,13 +738,12 @@ static int fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma)
if (reg & FSL_QDMA_BSQSR_QE)
return 0;
status_addr = fsl_status->virt_head;
- if (status_addr->queue == pre_queue &&
- status_addr->addr == pre_addr)
+ if (qdma_ccdf_get_queue(status_addr) == pre_queue &&
+ qdma_ccdf_addr_get64(status_addr) == pre_addr)
duplicate = 1;
-
- i = status_addr->queue;
- pre_queue = status_addr->queue;
- pre_addr = status_addr->addr;
+ i = qdma_ccdf_get_queue(status_addr);
+ pre_queue = qdma_ccdf_get_queue(status_addr);
+ pre_addr = qdma_ccdf_addr_get64(status_addr);
temp_queue = fsl_queue + i;
spin_lock(&temp_queue->queue_lock);
if (list_empty(&temp_queue->comp_used)) {
@@ -716,8 +759,7 @@ static int fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma)
list);
csgf_src = (struct fsl_qdma_csgf *)fsl_comp->virt_addr
+ 2;
- if (fsl_comp->bus_addr + 16 !=
- (dma_addr_t)status_addr->addr) {
+ if (fsl_comp->bus_addr + 16 != pre_addr) {
if (duplicate)
duplicate_handle = 1;
else {
@@ -730,7 +772,7 @@ static int fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma)
if (duplicate_handle) {
reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
reg |= FSL_QDMA_BSQMR_DI;
- status_addr->addr = 0x0;
+ qdma_ccdf_addr_set64(status_addr, 0x0);
fsl_status->virt_head++;
if (fsl_status->virt_head == fsl_status->cq
+ fsl_status->n_cq)
@@ -743,7 +785,7 @@ static int fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma)
reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
reg |= FSL_QDMA_BSQMR_DI;
- status_addr->addr = 0x0;
+ qdma_ccdf_addr_set64(status_addr, 0x0);
fsl_status->virt_head++;
if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq)
fsl_status->virt_head = fsl_status->cq;