From 458f5884a1972f26c5260539822b0d158dd85e92 Mon Sep 17 00:00:00 2001 From: Andy Gross Date: Mon, 29 Feb 2016 17:15:19 -0600 Subject: dmaengine: qcom_bam_dma: Make driver work for BE This patch fixes the Qualcomm BAM dmaenging driver to work with big endian kernels. Signed-off-by: Andy Gross Signed-off-by: Vinod Koul diff --git a/drivers/dma/qcom_bam_dma.c b/drivers/dma/qcom_bam_dma.c index 5a250cd..37f7aec 100644 --- a/drivers/dma/qcom_bam_dma.c +++ b/drivers/dma/qcom_bam_dma.c @@ -53,9 +53,9 @@ #include "virt-dma.h" struct bam_desc_hw { - u32 addr; /* Buffer physical address */ - u16 size; /* Buffer size in bytes */ - u16 flags; + __le32 addr; /* Buffer physical address */ + __le16 size; /* Buffer size in bytes */ + __le16 flags; }; #define DESC_FLAG_INT BIT(15) @@ -632,14 +632,15 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan, unsigned int curr_offset = 0; do { - desc->addr = sg_dma_address(sg) + curr_offset; + desc->addr = cpu_to_le32(sg_dma_address(sg) + + curr_offset); if (remainder > BAM_MAX_DATA_SIZE) { - desc->size = BAM_MAX_DATA_SIZE; + desc->size = cpu_to_le16(BAM_MAX_DATA_SIZE); remainder -= BAM_MAX_DATA_SIZE; curr_offset += BAM_MAX_DATA_SIZE; } else { - desc->size = remainder; + desc->size = cpu_to_le16(remainder); remainder = 0; } @@ -915,9 +916,11 @@ static void bam_start_dma(struct bam_chan *bchan) /* set any special flags on the last descriptor */ if (async_desc->num_desc == async_desc->xfer_len) - desc[async_desc->xfer_len - 1].flags = async_desc->flags; + desc[async_desc->xfer_len - 1].flags = + cpu_to_le16(async_desc->flags); else - desc[async_desc->xfer_len - 1].flags |= DESC_FLAG_INT; + desc[async_desc->xfer_len - 1].flags |= + cpu_to_le16(DESC_FLAG_INT); if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) { u32 partial = MAX_DESCRIPTORS - bchan->tail; -- cgit v0.10.2 From d9b31efcbf41cd249419bca82065976ca44e3ae9 Mon Sep 17 00:00:00 2001 From: Sinan Kaya Date: Thu, 4 Feb 2016 23:34:32 -0500 Subject: dmaengine: qcom_bam_dma: move to qcom directory Creating a QCOM directory for all QCOM DMA source files. Signed-off-by: Sinan Kaya Reviewed-by: Andy Gross Signed-off-by: Vinod Koul diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 79b1390..245b061 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -408,15 +408,6 @@ config PXA_DMA 16 to 32 channels for peripheral to memory or memory to memory transfers. -config QCOM_BAM_DMA - tristate "QCOM BAM DMA support" - depends on ARCH_QCOM || (COMPILE_TEST && OF && ARM) - select DMA_ENGINE - select DMA_VIRTUAL_CHANNELS - ---help--- - Enable support for the QCOM BAM DMA controller. This controller - provides DMA capabilities for a variety of on-chip devices. - config SIRF_DMA tristate "CSR SiRFprimaII/SiRFmarco DMA support" depends on ARCH_SIRF @@ -539,6 +530,8 @@ config ZX_DMA # driver files source "drivers/dma/bestcomm/Kconfig" +source "drivers/dma/qcom/Kconfig" + source "drivers/dma/dw/Kconfig" source "drivers/dma/hsu/Kconfig" diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 2dd0a067..6084127 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -52,7 +52,6 @@ obj-$(CONFIG_PCH_DMA) += pch_dma.o obj-$(CONFIG_PL330_DMA) += pl330.o obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/ obj-$(CONFIG_PXA_DMA) += pxa_dma.o -obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o obj-$(CONFIG_RENESAS_DMA) += sh/ obj-$(CONFIG_SIRF_DMA) += sirf-dma.o obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o @@ -67,4 +66,5 @@ obj-$(CONFIG_TI_EDMA) += edma.o obj-$(CONFIG_XGENE_DMA) += xgene-dma.o obj-$(CONFIG_ZX_DMA) += zx296702_dma.o +obj-y += qcom/ obj-y += xilinx/ diff --git a/drivers/dma/qcom/Kconfig b/drivers/dma/qcom/Kconfig new file mode 100644 index 0000000..f17c272 --- /dev/null +++ b/drivers/dma/qcom/Kconfig @@ -0,0 +1,8 @@ +config QCOM_BAM_DMA + tristate "QCOM BAM DMA support" + depends on ARCH_QCOM || (COMPILE_TEST && OF && ARM) + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + ---help--- + Enable support for the QCOM BAM DMA controller. This controller + provides DMA capabilities for a variety of on-chip devices. diff --git a/drivers/dma/qcom/Makefile b/drivers/dma/qcom/Makefile new file mode 100644 index 0000000..f612ae3 --- /dev/null +++ b/drivers/dma/qcom/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_QCOM_BAM_DMA) += bam_dma.o diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c new file mode 100644 index 0000000..2d691a3 --- /dev/null +++ b/drivers/dma/qcom/bam_dma.c @@ -0,0 +1,1262 @@ +/* + * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +/* + * QCOM BAM DMA engine driver + * + * QCOM BAM DMA blocks are distributed amongst a number of the on-chip + * peripherals on the MSM 8x74. The configuration of the channels are dependent + * on the way they are hard wired to that specific peripheral. The peripheral + * device tree entries specify the configuration of each channel. + * + * The DMA controller requires the use of external memory for storage of the + * hardware descriptors for each channel. The descriptor FIFO is accessed as a + * circular buffer and operations are managed according to the offset within the + * FIFO. After pipe/channel reset, all of the pipe registers and internal state + * are back to defaults. + * + * During DMA operations, we write descriptors to the FIFO, being careful to + * handle wrapping and then write the last FIFO offset to that channel's + * P_EVNT_REG register to kick off the transaction. The P_SW_OFSTS register + * indicates the current FIFO offset that is being processed, so there is some + * indication of where the hardware is currently working. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../dmaengine.h" +#include "../virt-dma.h" + +struct bam_desc_hw { + __le32 addr; /* Buffer physical address */ + __le16 size; /* Buffer size in bytes */ + __le16 flags; +}; + +#define DESC_FLAG_INT BIT(15) +#define DESC_FLAG_EOT BIT(14) +#define DESC_FLAG_EOB BIT(13) +#define DESC_FLAG_NWD BIT(12) + +struct bam_async_desc { + struct virt_dma_desc vd; + + u32 num_desc; + u32 xfer_len; + + /* transaction flags, EOT|EOB|NWD */ + u16 flags; + + struct bam_desc_hw *curr_desc; + + enum dma_transfer_direction dir; + size_t length; + struct bam_desc_hw desc[0]; +}; + +enum bam_reg { + BAM_CTRL, + BAM_REVISION, + BAM_NUM_PIPES, + BAM_DESC_CNT_TRSHLD, + BAM_IRQ_SRCS, + BAM_IRQ_SRCS_MSK, + BAM_IRQ_SRCS_UNMASKED, + BAM_IRQ_STTS, + BAM_IRQ_CLR, + BAM_IRQ_EN, + BAM_CNFG_BITS, + BAM_IRQ_SRCS_EE, + BAM_IRQ_SRCS_MSK_EE, + BAM_P_CTRL, + BAM_P_RST, + BAM_P_HALT, + BAM_P_IRQ_STTS, + BAM_P_IRQ_CLR, + BAM_P_IRQ_EN, + BAM_P_EVNT_DEST_ADDR, + BAM_P_EVNT_REG, + BAM_P_SW_OFSTS, + BAM_P_DATA_FIFO_ADDR, + BAM_P_DESC_FIFO_ADDR, + BAM_P_EVNT_GEN_TRSHLD, + BAM_P_FIFO_SIZES, +}; + +struct reg_offset_data { + u32 base_offset; + unsigned int pipe_mult, evnt_mult, ee_mult; +}; + +static const struct reg_offset_data bam_v1_3_reg_info[] = { + [BAM_CTRL] = { 0x0F80, 0x00, 0x00, 0x00 }, + [BAM_REVISION] = { 0x0F84, 0x00, 0x00, 0x00 }, + [BAM_NUM_PIPES] = { 0x0FBC, 0x00, 0x00, 0x00 }, + [BAM_DESC_CNT_TRSHLD] = { 0x0F88, 0x00, 0x00, 0x00 }, + [BAM_IRQ_SRCS] = { 0x0F8C, 0x00, 0x00, 0x00 }, + [BAM_IRQ_SRCS_MSK] = { 0x0F90, 0x00, 0x00, 0x00 }, + [BAM_IRQ_SRCS_UNMASKED] = { 0x0FB0, 0x00, 0x00, 0x00 }, + [BAM_IRQ_STTS] = { 0x0F94, 0x00, 0x00, 0x00 }, + [BAM_IRQ_CLR] = { 0x0F98, 0x00, 0x00, 0x00 }, + [BAM_IRQ_EN] = { 0x0F9C, 0x00, 0x00, 0x00 }, + [BAM_CNFG_BITS] = { 0x0FFC, 0x00, 0x00, 0x00 }, + [BAM_IRQ_SRCS_EE] = { 0x1800, 0x00, 0x00, 0x80 }, + [BAM_IRQ_SRCS_MSK_EE] = { 0x1804, 0x00, 0x00, 0x80 }, + [BAM_P_CTRL] = { 0x0000, 0x80, 0x00, 0x00 }, + [BAM_P_RST] = { 0x0004, 0x80, 0x00, 0x00 }, + [BAM_P_HALT] = { 0x0008, 0x80, 0x00, 0x00 }, + [BAM_P_IRQ_STTS] = { 0x0010, 0x80, 0x00, 0x00 }, + [BAM_P_IRQ_CLR] = { 0x0014, 0x80, 0x00, 0x00 }, + [BAM_P_IRQ_EN] = { 0x0018, 0x80, 0x00, 0x00 }, + [BAM_P_EVNT_DEST_ADDR] = { 0x102C, 0x00, 0x40, 0x00 }, + [BAM_P_EVNT_REG] = { 0x1018, 0x00, 0x40, 0x00 }, + [BAM_P_SW_OFSTS] = { 0x1000, 0x00, 0x40, 0x00 }, + [BAM_P_DATA_FIFO_ADDR] = { 0x1024, 0x00, 0x40, 0x00 }, + [BAM_P_DESC_FIFO_ADDR] = { 0x101C, 0x00, 0x40, 0x00 }, + [BAM_P_EVNT_GEN_TRSHLD] = { 0x1028, 0x00, 0x40, 0x00 }, + [BAM_P_FIFO_SIZES] = { 0x1020, 0x00, 0x40, 0x00 }, +}; + +static const struct reg_offset_data bam_v1_4_reg_info[] = { + [BAM_CTRL] = { 0x0000, 0x00, 0x00, 0x00 }, + [BAM_REVISION] = { 0x0004, 0x00, 0x00, 0x00 }, + [BAM_NUM_PIPES] = { 0x003C, 0x00, 0x00, 0x00 }, + [BAM_DESC_CNT_TRSHLD] = { 0x0008, 0x00, 0x00, 0x00 }, + [BAM_IRQ_SRCS] = { 0x000C, 0x00, 0x00, 0x00 }, + [BAM_IRQ_SRCS_MSK] = { 0x0010, 0x00, 0x00, 0x00 }, + [BAM_IRQ_SRCS_UNMASKED] = { 0x0030, 0x00, 0x00, 0x00 }, + [BAM_IRQ_STTS] = { 0x0014, 0x00, 0x00, 0x00 }, + [BAM_IRQ_CLR] = { 0x0018, 0x00, 0x00, 0x00 }, + [BAM_IRQ_EN] = { 0x001C, 0x00, 0x00, 0x00 }, + [BAM_CNFG_BITS] = { 0x007C, 0x00, 0x00, 0x00 }, + [BAM_IRQ_SRCS_EE] = { 0x0800, 0x00, 0x00, 0x80 }, + [BAM_IRQ_SRCS_MSK_EE] = { 0x0804, 0x00, 0x00, 0x80 }, + [BAM_P_CTRL] = { 0x1000, 0x1000, 0x00, 0x00 }, + [BAM_P_RST] = { 0x1004, 0x1000, 0x00, 0x00 }, + [BAM_P_HALT] = { 0x1008, 0x1000, 0x00, 0x00 }, + [BAM_P_IRQ_STTS] = { 0x1010, 0x1000, 0x00, 0x00 }, + [BAM_P_IRQ_CLR] = { 0x1014, 0x1000, 0x00, 0x00 }, + [BAM_P_IRQ_EN] = { 0x1018, 0x1000, 0x00, 0x00 }, + [BAM_P_EVNT_DEST_ADDR] = { 0x182C, 0x00, 0x1000, 0x00 }, + [BAM_P_EVNT_REG] = { 0x1818, 0x00, 0x1000, 0x00 }, + [BAM_P_SW_OFSTS] = { 0x1800, 0x00, 0x1000, 0x00 }, + [BAM_P_DATA_FIFO_ADDR] = { 0x1824, 0x00, 0x1000, 0x00 }, + [BAM_P_DESC_FIFO_ADDR] = { 0x181C, 0x00, 0x1000, 0x00 }, + [BAM_P_EVNT_GEN_TRSHLD] = { 0x1828, 0x00, 0x1000, 0x00 }, + [BAM_P_FIFO_SIZES] = { 0x1820, 0x00, 0x1000, 0x00 }, +}; + +static const struct reg_offset_data bam_v1_7_reg_info[] = { + [BAM_CTRL] = { 0x00000, 0x00, 0x00, 0x00 }, + [BAM_REVISION] = { 0x01000, 0x00, 0x00, 0x00 }, + [BAM_NUM_PIPES] = { 0x01008, 0x00, 0x00, 0x00 }, + [BAM_DESC_CNT_TRSHLD] = { 0x00008, 0x00, 0x00, 0x00 }, + [BAM_IRQ_SRCS] = { 0x03010, 0x00, 0x00, 0x00 }, + [BAM_IRQ_SRCS_MSK] = { 0x03014, 0x00, 0x00, 0x00 }, + [BAM_IRQ_SRCS_UNMASKED] = { 0x03018, 0x00, 0x00, 0x00 }, + [BAM_IRQ_STTS] = { 0x00014, 0x00, 0x00, 0x00 }, + [BAM_IRQ_CLR] = { 0x00018, 0x00, 0x00, 0x00 }, + [BAM_IRQ_EN] = { 0x0001C, 0x00, 0x00, 0x00 }, + [BAM_CNFG_BITS] = { 0x0007C, 0x00, 0x00, 0x00 }, + [BAM_IRQ_SRCS_EE] = { 0x03000, 0x00, 0x00, 0x1000 }, + [BAM_IRQ_SRCS_MSK_EE] = { 0x03004, 0x00, 0x00, 0x1000 }, + [BAM_P_CTRL] = { 0x13000, 0x1000, 0x00, 0x00 }, + [BAM_P_RST] = { 0x13004, 0x1000, 0x00, 0x00 }, + [BAM_P_HALT] = { 0x13008, 0x1000, 0x00, 0x00 }, + [BAM_P_IRQ_STTS] = { 0x13010, 0x1000, 0x00, 0x00 }, + [BAM_P_IRQ_CLR] = { 0x13014, 0x1000, 0x00, 0x00 }, + [BAM_P_IRQ_EN] = { 0x13018, 0x1000, 0x00, 0x00 }, + [BAM_P_EVNT_DEST_ADDR] = { 0x1382C, 0x00, 0x1000, 0x00 }, + [BAM_P_EVNT_REG] = { 0x13818, 0x00, 0x1000, 0x00 }, + [BAM_P_SW_OFSTS] = { 0x13800, 0x00, 0x1000, 0x00 }, + [BAM_P_DATA_FIFO_ADDR] = { 0x13824, 0x00, 0x1000, 0x00 }, + [BAM_P_DESC_FIFO_ADDR] = { 0x1381C, 0x00, 0x1000, 0x00 }, + [BAM_P_EVNT_GEN_TRSHLD] = { 0x13828, 0x00, 0x1000, 0x00 }, + [BAM_P_FIFO_SIZES] = { 0x13820, 0x00, 0x1000, 0x00 }, +}; + +/* BAM CTRL */ +#define BAM_SW_RST BIT(0) +#define BAM_EN BIT(1) +#define BAM_EN_ACCUM BIT(4) +#define BAM_TESTBUS_SEL_SHIFT 5 +#define BAM_TESTBUS_SEL_MASK 0x3F +#define BAM_DESC_CACHE_SEL_SHIFT 13 +#define BAM_DESC_CACHE_SEL_MASK 0x3 +#define BAM_CACHED_DESC_STORE BIT(15) +#define IBC_DISABLE BIT(16) + +/* BAM REVISION */ +#define REVISION_SHIFT 0 +#define REVISION_MASK 0xFF +#define NUM_EES_SHIFT 8 +#define NUM_EES_MASK 0xF +#define CE_BUFFER_SIZE BIT(13) +#define AXI_ACTIVE BIT(14) +#define USE_VMIDMT BIT(15) +#define SECURED BIT(16) +#define BAM_HAS_NO_BYPASS BIT(17) +#define HIGH_FREQUENCY_BAM BIT(18) +#define INACTIV_TMRS_EXST BIT(19) +#define NUM_INACTIV_TMRS BIT(20) +#define DESC_CACHE_DEPTH_SHIFT 21 +#define DESC_CACHE_DEPTH_1 (0 << DESC_CACHE_DEPTH_SHIFT) +#define DESC_CACHE_DEPTH_2 (1 << DESC_CACHE_DEPTH_SHIFT) +#define DESC_CACHE_DEPTH_3 (2 << DESC_CACHE_DEPTH_SHIFT) +#define DESC_CACHE_DEPTH_4 (3 << DESC_CACHE_DEPTH_SHIFT) +#define CMD_DESC_EN BIT(23) +#define INACTIV_TMR_BASE_SHIFT 24 +#define INACTIV_TMR_BASE_MASK 0xFF + +/* BAM NUM PIPES */ +#define BAM_NUM_PIPES_SHIFT 0 +#define BAM_NUM_PIPES_MASK 0xFF +#define PERIPH_NON_PIPE_GRP_SHIFT 16 +#define PERIPH_NON_PIP_GRP_MASK 0xFF +#define BAM_NON_PIPE_GRP_SHIFT 24 +#define BAM_NON_PIPE_GRP_MASK 0xFF + +/* BAM CNFG BITS */ +#define BAM_PIPE_CNFG BIT(2) +#define BAM_FULL_PIPE BIT(11) +#define BAM_NO_EXT_P_RST BIT(12) +#define BAM_IBC_DISABLE BIT(13) +#define BAM_SB_CLK_REQ BIT(14) +#define BAM_PSM_CSW_REQ BIT(15) +#define BAM_PSM_P_RES BIT(16) +#define BAM_AU_P_RES BIT(17) +#define BAM_SI_P_RES BIT(18) +#define BAM_WB_P_RES BIT(19) +#define BAM_WB_BLK_CSW BIT(20) +#define BAM_WB_CSW_ACK_IDL BIT(21) +#define BAM_WB_RETR_SVPNT BIT(22) +#define BAM_WB_DSC_AVL_P_RST BIT(23) +#define BAM_REG_P_EN BIT(24) +#define BAM_PSM_P_HD_DATA BIT(25) +#define BAM_AU_ACCUMED BIT(26) +#define BAM_CMD_ENABLE BIT(27) + +#define BAM_CNFG_BITS_DEFAULT (BAM_PIPE_CNFG | \ + BAM_NO_EXT_P_RST | \ + BAM_IBC_DISABLE | \ + BAM_SB_CLK_REQ | \ + BAM_PSM_CSW_REQ | \ + BAM_PSM_P_RES | \ + BAM_AU_P_RES | \ + BAM_SI_P_RES | \ + BAM_WB_P_RES | \ + BAM_WB_BLK_CSW | \ + BAM_WB_CSW_ACK_IDL | \ + BAM_WB_RETR_SVPNT | \ + BAM_WB_DSC_AVL_P_RST | \ + BAM_REG_P_EN | \ + BAM_PSM_P_HD_DATA | \ + BAM_AU_ACCUMED | \ + BAM_CMD_ENABLE) + +/* PIPE CTRL */ +#define P_EN BIT(1) +#define P_DIRECTION BIT(3) +#define P_SYS_STRM BIT(4) +#define P_SYS_MODE BIT(5) +#define P_AUTO_EOB BIT(6) +#define P_AUTO_EOB_SEL_SHIFT 7 +#define P_AUTO_EOB_SEL_512 (0 << P_AUTO_EOB_SEL_SHIFT) +#define P_AUTO_EOB_SEL_256 (1 << P_AUTO_EOB_SEL_SHIFT) +#define P_AUTO_EOB_SEL_128 (2 << P_AUTO_EOB_SEL_SHIFT) +#define P_AUTO_EOB_SEL_64 (3 << P_AUTO_EOB_SEL_SHIFT) +#define P_PREFETCH_LIMIT_SHIFT 9 +#define P_PREFETCH_LIMIT_32 (0 << P_PREFETCH_LIMIT_SHIFT) +#define P_PREFETCH_LIMIT_16 (1 << P_PREFETCH_LIMIT_SHIFT) +#define P_PREFETCH_LIMIT_4 (2 << P_PREFETCH_LIMIT_SHIFT) +#define P_WRITE_NWD BIT(11) +#define P_LOCK_GROUP_SHIFT 16 +#define P_LOCK_GROUP_MASK 0x1F + +/* BAM_DESC_CNT_TRSHLD */ +#define CNT_TRSHLD 0xffff +#define DEFAULT_CNT_THRSHLD 0x4 + +/* BAM_IRQ_SRCS */ +#define BAM_IRQ BIT(31) +#define P_IRQ 0x7fffffff + +/* BAM_IRQ_SRCS_MSK */ +#define BAM_IRQ_MSK BAM_IRQ +#define P_IRQ_MSK P_IRQ + +/* BAM_IRQ_STTS */ +#define BAM_TIMER_IRQ BIT(4) +#define BAM_EMPTY_IRQ BIT(3) +#define BAM_ERROR_IRQ BIT(2) +#define BAM_HRESP_ERR_IRQ BIT(1) + +/* BAM_IRQ_CLR */ +#define BAM_TIMER_CLR BIT(4) +#define BAM_EMPTY_CLR BIT(3) +#define BAM_ERROR_CLR BIT(2) +#define BAM_HRESP_ERR_CLR BIT(1) + +/* BAM_IRQ_EN */ +#define BAM_TIMER_EN BIT(4) +#define BAM_EMPTY_EN BIT(3) +#define BAM_ERROR_EN BIT(2) +#define BAM_HRESP_ERR_EN BIT(1) + +/* BAM_P_IRQ_EN */ +#define P_PRCSD_DESC_EN BIT(0) +#define P_TIMER_EN BIT(1) +#define P_WAKE_EN BIT(2) +#define P_OUT_OF_DESC_EN BIT(3) +#define P_ERR_EN BIT(4) +#define P_TRNSFR_END_EN BIT(5) +#define P_DEFAULT_IRQS_EN (P_PRCSD_DESC_EN | P_ERR_EN | P_TRNSFR_END_EN) + +/* BAM_P_SW_OFSTS */ +#define P_SW_OFSTS_MASK 0xffff + +#define BAM_DESC_FIFO_SIZE SZ_32K +#define MAX_DESCRIPTORS (BAM_DESC_FIFO_SIZE / sizeof(struct bam_desc_hw) - 1) +#define BAM_MAX_DATA_SIZE (SZ_32K - 8) + +struct bam_chan { + struct virt_dma_chan vc; + + struct bam_device *bdev; + + /* configuration from device tree */ + u32 id; + + struct bam_async_desc *curr_txd; /* current running dma */ + + /* runtime configuration */ + struct dma_slave_config slave; + + /* fifo storage */ + struct bam_desc_hw *fifo_virt; + dma_addr_t fifo_phys; + + /* fifo markers */ + unsigned short head; /* start of active descriptor entries */ + unsigned short tail; /* end of active descriptor entries */ + + unsigned int initialized; /* is the channel hw initialized? */ + unsigned int paused; /* is the channel paused? */ + unsigned int reconfigure; /* new slave config? */ + + struct list_head node; +}; + +static inline struct bam_chan *to_bam_chan(struct dma_chan *common) +{ + return container_of(common, struct bam_chan, vc.chan); +} + +struct bam_device { + void __iomem *regs; + struct device *dev; + struct dma_device common; + struct device_dma_parameters dma_parms; + struct bam_chan *channels; + u32 num_channels; + + /* execution environment ID, from DT */ + u32 ee; + + const struct reg_offset_data *layout; + + struct clk *bamclk; + int irq; + + /* dma start transaction tasklet */ + struct tasklet_struct task; +}; + +/** + * bam_addr - returns BAM register address + * @bdev: bam device + * @pipe: pipe instance (ignored when register doesn't have multiple instances) + * @reg: register enum + */ +static inline void __iomem *bam_addr(struct bam_device *bdev, u32 pipe, + enum bam_reg reg) +{ + const struct reg_offset_data r = bdev->layout[reg]; + + return bdev->regs + r.base_offset + + r.pipe_mult * pipe + + r.evnt_mult * pipe + + r.ee_mult * bdev->ee; +} + +/** + * bam_reset_channel - Reset individual BAM DMA channel + * @bchan: bam channel + * + * This function resets a specific BAM channel + */ +static void bam_reset_channel(struct bam_chan *bchan) +{ + struct bam_device *bdev = bchan->bdev; + + lockdep_assert_held(&bchan->vc.lock); + + /* reset channel */ + writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_RST)); + writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_RST)); + + /* don't allow cpu to reorder BAM register accesses done after this */ + wmb(); + + /* make sure hw is initialized when channel is used the first time */ + bchan->initialized = 0; +} + +/** + * bam_chan_init_hw - Initialize channel hardware + * @bchan: bam channel + * + * This function resets and initializes the BAM channel + */ +static void bam_chan_init_hw(struct bam_chan *bchan, + enum dma_transfer_direction dir) +{ + struct bam_device *bdev = bchan->bdev; + u32 val; + + /* Reset the channel to clear internal state of the FIFO */ + bam_reset_channel(bchan); + + /* + * write out 8 byte aligned address. We have enough space for this + * because we allocated 1 more descriptor (8 bytes) than we can use + */ + writel_relaxed(ALIGN(bchan->fifo_phys, sizeof(struct bam_desc_hw)), + bam_addr(bdev, bchan->id, BAM_P_DESC_FIFO_ADDR)); + writel_relaxed(BAM_DESC_FIFO_SIZE, + bam_addr(bdev, bchan->id, BAM_P_FIFO_SIZES)); + + /* enable the per pipe interrupts, enable EOT, ERR, and INT irqs */ + writel_relaxed(P_DEFAULT_IRQS_EN, + bam_addr(bdev, bchan->id, BAM_P_IRQ_EN)); + + /* unmask the specific pipe and EE combo */ + val = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE)); + val |= BIT(bchan->id); + writel_relaxed(val, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE)); + + /* don't allow cpu to reorder the channel enable done below */ + wmb(); + + /* set fixed direction and mode, then enable channel */ + val = P_EN | P_SYS_MODE; + if (dir == DMA_DEV_TO_MEM) + val |= P_DIRECTION; + + writel_relaxed(val, bam_addr(bdev, bchan->id, BAM_P_CTRL)); + + bchan->initialized = 1; + + /* init FIFO pointers */ + bchan->head = 0; + bchan->tail = 0; +} + +/** + * bam_alloc_chan - Allocate channel resources for DMA channel. + * @chan: specified channel + * + * This function allocates the FIFO descriptor memory + */ +static int bam_alloc_chan(struct dma_chan *chan) +{ + struct bam_chan *bchan = to_bam_chan(chan); + struct bam_device *bdev = bchan->bdev; + + if (bchan->fifo_virt) + return 0; + + /* allocate FIFO descriptor space, but only if necessary */ + bchan->fifo_virt = dma_alloc_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE, + &bchan->fifo_phys, GFP_KERNEL); + + if (!bchan->fifo_virt) { + dev_err(bdev->dev, "Failed to allocate desc fifo\n"); + return -ENOMEM; + } + + return 0; +} + +/** + * bam_free_chan - Frees dma resources associated with specific channel + * @chan: specified channel + * + * Free the allocated fifo descriptor memory and channel resources + * + */ +static void bam_free_chan(struct dma_chan *chan) +{ + struct bam_chan *bchan = to_bam_chan(chan); + struct bam_device *bdev = bchan->bdev; + u32 val; + unsigned long flags; + + vchan_free_chan_resources(to_virt_chan(chan)); + + if (bchan->curr_txd) { + dev_err(bchan->bdev->dev, "Cannot free busy channel\n"); + return; + } + + spin_lock_irqsave(&bchan->vc.lock, flags); + bam_reset_channel(bchan); + spin_unlock_irqrestore(&bchan->vc.lock, flags); + + dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE, bchan->fifo_virt, + bchan->fifo_phys); + bchan->fifo_virt = NULL; + + /* mask irq for pipe/channel */ + val = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE)); + val &= ~BIT(bchan->id); + writel_relaxed(val, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE)); + + /* disable irq */ + writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_IRQ_EN)); +} + +/** + * bam_slave_config - set slave configuration for channel + * @chan: dma channel + * @cfg: slave configuration + * + * Sets slave configuration for channel + * + */ +static int bam_slave_config(struct dma_chan *chan, + struct dma_slave_config *cfg) +{ + struct bam_chan *bchan = to_bam_chan(chan); + unsigned long flag; + + spin_lock_irqsave(&bchan->vc.lock, flag); + memcpy(&bchan->slave, cfg, sizeof(*cfg)); + bchan->reconfigure = 1; + spin_unlock_irqrestore(&bchan->vc.lock, flag); + + return 0; +} + +/** + * bam_prep_slave_sg - Prep slave sg transaction + * + * @chan: dma channel + * @sgl: scatter gather list + * @sg_len: length of sg + * @direction: DMA transfer direction + * @flags: DMA flags + * @context: transfer context (unused) + */ +static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan, + struct scatterlist *sgl, unsigned int sg_len, + enum dma_transfer_direction direction, unsigned long flags, + void *context) +{ + struct bam_chan *bchan = to_bam_chan(chan); + struct bam_device *bdev = bchan->bdev; + struct bam_async_desc *async_desc; + struct scatterlist *sg; + u32 i; + struct bam_desc_hw *desc; + unsigned int num_alloc = 0; + + + if (!is_slave_direction(direction)) { + dev_err(bdev->dev, "invalid dma direction\n"); + return NULL; + } + + /* calculate number of required entries */ + for_each_sg(sgl, sg, sg_len, i) + num_alloc += DIV_ROUND_UP(sg_dma_len(sg), BAM_MAX_DATA_SIZE); + + /* allocate enough room to accomodate the number of entries */ + async_desc = kzalloc(sizeof(*async_desc) + + (num_alloc * sizeof(struct bam_desc_hw)), GFP_NOWAIT); + + if (!async_desc) + goto err_out; + + if (flags & DMA_PREP_FENCE) + async_desc->flags |= DESC_FLAG_NWD; + + if (flags & DMA_PREP_INTERRUPT) + async_desc->flags |= DESC_FLAG_EOT; + else + async_desc->flags |= DESC_FLAG_INT; + + async_desc->num_desc = num_alloc; + async_desc->curr_desc = async_desc->desc; + async_desc->dir = direction; + + /* fill in temporary descriptors */ + desc = async_desc->desc; + for_each_sg(sgl, sg, sg_len, i) { + unsigned int remainder = sg_dma_len(sg); + unsigned int curr_offset = 0; + + do { + desc->addr = cpu_to_le32(sg_dma_address(sg) + + curr_offset); + + if (remainder > BAM_MAX_DATA_SIZE) { + desc->size = cpu_to_le16(BAM_MAX_DATA_SIZE); + remainder -= BAM_MAX_DATA_SIZE; + curr_offset += BAM_MAX_DATA_SIZE; + } else { + desc->size = cpu_to_le16(remainder); + remainder = 0; + } + + async_desc->length += desc->size; + desc++; + } while (remainder > 0); + } + + return vchan_tx_prep(&bchan->vc, &async_desc->vd, flags); + +err_out: + kfree(async_desc); + return NULL; +} + +/** + * bam_dma_terminate_all - terminate all transactions on a channel + * @bchan: bam dma channel + * + * Dequeues and frees all transactions + * No callbacks are done + * + */ +static int bam_dma_terminate_all(struct dma_chan *chan) +{ + struct bam_chan *bchan = to_bam_chan(chan); + unsigned long flag; + LIST_HEAD(head); + + /* remove all transactions, including active transaction */ + spin_lock_irqsave(&bchan->vc.lock, flag); + if (bchan->curr_txd) { + list_add(&bchan->curr_txd->vd.node, &bchan->vc.desc_issued); + bchan->curr_txd = NULL; + } + + vchan_get_all_descriptors(&bchan->vc, &head); + spin_unlock_irqrestore(&bchan->vc.lock, flag); + + vchan_dma_desc_free_list(&bchan->vc, &head); + + return 0; +} + +/** + * bam_pause - Pause DMA channel + * @chan: dma channel + * + */ +static int bam_pause(struct dma_chan *chan) +{ + struct bam_chan *bchan = to_bam_chan(chan); + struct bam_device *bdev = bchan->bdev; + unsigned long flag; + + spin_lock_irqsave(&bchan->vc.lock, flag); + writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT)); + bchan->paused = 1; + spin_unlock_irqrestore(&bchan->vc.lock, flag); + + return 0; +} + +/** + * bam_resume - Resume DMA channel operations + * @chan: dma channel + * + */ +static int bam_resume(struct dma_chan *chan) +{ + struct bam_chan *bchan = to_bam_chan(chan); + struct bam_device *bdev = bchan->bdev; + unsigned long flag; + + spin_lock_irqsave(&bchan->vc.lock, flag); + writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT)); + bchan->paused = 0; + spin_unlock_irqrestore(&bchan->vc.lock, flag); + + return 0; +} + +/** + * process_channel_irqs - processes the channel interrupts + * @bdev: bam controller + * + * This function processes the channel interrupts + * + */ +static u32 process_channel_irqs(struct bam_device *bdev) +{ + u32 i, srcs, pipe_stts; + unsigned long flags; + struct bam_async_desc *async_desc; + + srcs = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_EE)); + + /* return early if no pipe/channel interrupts are present */ + if (!(srcs & P_IRQ)) + return srcs; + + for (i = 0; i < bdev->num_channels; i++) { + struct bam_chan *bchan = &bdev->channels[i]; + + if (!(srcs & BIT(i))) + continue; + + /* clear pipe irq */ + pipe_stts = readl_relaxed(bam_addr(bdev, i, BAM_P_IRQ_STTS)); + + writel_relaxed(pipe_stts, bam_addr(bdev, i, BAM_P_IRQ_CLR)); + + spin_lock_irqsave(&bchan->vc.lock, flags); + async_desc = bchan->curr_txd; + + if (async_desc) { + async_desc->num_desc -= async_desc->xfer_len; + async_desc->curr_desc += async_desc->xfer_len; + bchan->curr_txd = NULL; + + /* manage FIFO */ + bchan->head += async_desc->xfer_len; + bchan->head %= MAX_DESCRIPTORS; + + /* + * if complete, process cookie. Otherwise + * push back to front of desc_issued so that + * it gets restarted by the tasklet + */ + if (!async_desc->num_desc) + vchan_cookie_complete(&async_desc->vd); + else + list_add(&async_desc->vd.node, + &bchan->vc.desc_issued); + } + + spin_unlock_irqrestore(&bchan->vc.lock, flags); + } + + return srcs; +} + +/** + * bam_dma_irq - irq handler for bam controller + * @irq: IRQ of interrupt + * @data: callback data + * + * IRQ handler for the bam controller + */ +static irqreturn_t bam_dma_irq(int irq, void *data) +{ + struct bam_device *bdev = data; + u32 clr_mask = 0, srcs = 0; + + srcs |= process_channel_irqs(bdev); + + /* kick off tasklet to start next dma transfer */ + if (srcs & P_IRQ) + tasklet_schedule(&bdev->task); + + if (srcs & BAM_IRQ) + clr_mask = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_STTS)); + + /* don't allow reorder of the various accesses to the BAM registers */ + mb(); + + writel_relaxed(clr_mask, bam_addr(bdev, 0, BAM_IRQ_CLR)); + + return IRQ_HANDLED; +} + +/** + * bam_tx_status - returns status of transaction + * @chan: dma channel + * @cookie: transaction cookie + * @txstate: DMA transaction state + * + * Return status of dma transaction + */ +static enum dma_status bam_tx_status(struct dma_chan *chan, dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + struct bam_chan *bchan = to_bam_chan(chan); + struct virt_dma_desc *vd; + int ret; + size_t residue = 0; + unsigned int i; + unsigned long flags; + + ret = dma_cookie_status(chan, cookie, txstate); + if (ret == DMA_COMPLETE) + return ret; + + if (!txstate) + return bchan->paused ? DMA_PAUSED : ret; + + spin_lock_irqsave(&bchan->vc.lock, flags); + vd = vchan_find_desc(&bchan->vc, cookie); + if (vd) + residue = container_of(vd, struct bam_async_desc, vd)->length; + else if (bchan->curr_txd && bchan->curr_txd->vd.tx.cookie == cookie) + for (i = 0; i < bchan->curr_txd->num_desc; i++) + residue += bchan->curr_txd->curr_desc[i].size; + + spin_unlock_irqrestore(&bchan->vc.lock, flags); + + dma_set_residue(txstate, residue); + + if (ret == DMA_IN_PROGRESS && bchan->paused) + ret = DMA_PAUSED; + + return ret; +} + +/** + * bam_apply_new_config + * @bchan: bam dma channel + * @dir: DMA direction + */ +static void bam_apply_new_config(struct bam_chan *bchan, + enum dma_transfer_direction dir) +{ + struct bam_device *bdev = bchan->bdev; + u32 maxburst; + + if (dir == DMA_DEV_TO_MEM) + maxburst = bchan->slave.src_maxburst; + else + maxburst = bchan->slave.dst_maxburst; + + writel_relaxed(maxburst, bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD)); + + bchan->reconfigure = 0; +} + +/** + * bam_start_dma - start next transaction + * @bchan - bam dma channel + */ +static void bam_start_dma(struct bam_chan *bchan) +{ + struct virt_dma_desc *vd = vchan_next_desc(&bchan->vc); + struct bam_device *bdev = bchan->bdev; + struct bam_async_desc *async_desc; + struct bam_desc_hw *desc; + struct bam_desc_hw *fifo = PTR_ALIGN(bchan->fifo_virt, + sizeof(struct bam_desc_hw)); + + lockdep_assert_held(&bchan->vc.lock); + + if (!vd) + return; + + list_del(&vd->node); + + async_desc = container_of(vd, struct bam_async_desc, vd); + bchan->curr_txd = async_desc; + + /* on first use, initialize the channel hardware */ + if (!bchan->initialized) + bam_chan_init_hw(bchan, async_desc->dir); + + /* apply new slave config changes, if necessary */ + if (bchan->reconfigure) + bam_apply_new_config(bchan, async_desc->dir); + + desc = bchan->curr_txd->curr_desc; + + if (async_desc->num_desc > MAX_DESCRIPTORS) + async_desc->xfer_len = MAX_DESCRIPTORS; + else + async_desc->xfer_len = async_desc->num_desc; + + /* set any special flags on the last descriptor */ + if (async_desc->num_desc == async_desc->xfer_len) + desc[async_desc->xfer_len - 1].flags = + cpu_to_le16(async_desc->flags); + else + desc[async_desc->xfer_len - 1].flags |= + cpu_to_le16(DESC_FLAG_INT); + + if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) { + u32 partial = MAX_DESCRIPTORS - bchan->tail; + + memcpy(&fifo[bchan->tail], desc, + partial * sizeof(struct bam_desc_hw)); + memcpy(fifo, &desc[partial], (async_desc->xfer_len - partial) * + sizeof(struct bam_desc_hw)); + } else { + memcpy(&fifo[bchan->tail], desc, + async_desc->xfer_len * sizeof(struct bam_desc_hw)); + } + + bchan->tail += async_desc->xfer_len; + bchan->tail %= MAX_DESCRIPTORS; + + /* ensure descriptor writes and dma start not reordered */ + wmb(); + writel_relaxed(bchan->tail * sizeof(struct bam_desc_hw), + bam_addr(bdev, bchan->id, BAM_P_EVNT_REG)); +} + +/** + * dma_tasklet - DMA IRQ tasklet + * @data: tasklet argument (bam controller structure) + * + * Sets up next DMA operation and then processes all completed transactions + */ +static void dma_tasklet(unsigned long data) +{ + struct bam_device *bdev = (struct bam_device *)data; + struct bam_chan *bchan; + unsigned long flags; + unsigned int i; + + /* go through the channels and kick off transactions */ + for (i = 0; i < bdev->num_channels; i++) { + bchan = &bdev->channels[i]; + spin_lock_irqsave(&bchan->vc.lock, flags); + + if (!list_empty(&bchan->vc.desc_issued) && !bchan->curr_txd) + bam_start_dma(bchan); + spin_unlock_irqrestore(&bchan->vc.lock, flags); + } +} + +/** + * bam_issue_pending - starts pending transactions + * @chan: dma channel + * + * Calls tasklet directly which in turn starts any pending transactions + */ +static void bam_issue_pending(struct dma_chan *chan) +{ + struct bam_chan *bchan = to_bam_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&bchan->vc.lock, flags); + + /* if work pending and idle, start a transaction */ + if (vchan_issue_pending(&bchan->vc) && !bchan->curr_txd) + bam_start_dma(bchan); + + spin_unlock_irqrestore(&bchan->vc.lock, flags); +} + +/** + * bam_dma_free_desc - free descriptor memory + * @vd: virtual descriptor + * + */ +static void bam_dma_free_desc(struct virt_dma_desc *vd) +{ + struct bam_async_desc *async_desc = container_of(vd, + struct bam_async_desc, vd); + + kfree(async_desc); +} + +static struct dma_chan *bam_dma_xlate(struct of_phandle_args *dma_spec, + struct of_dma *of) +{ + struct bam_device *bdev = container_of(of->of_dma_data, + struct bam_device, common); + unsigned int request; + + if (dma_spec->args_count != 1) + return NULL; + + request = dma_spec->args[0]; + if (request >= bdev->num_channels) + return NULL; + + return dma_get_slave_channel(&(bdev->channels[request].vc.chan)); +} + +/** + * bam_init + * @bdev: bam device + * + * Initialization helper for global bam registers + */ +static int bam_init(struct bam_device *bdev) +{ + u32 val; + + /* read revision and configuration information */ + val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION)) >> NUM_EES_SHIFT; + val &= NUM_EES_MASK; + + /* check that configured EE is within range */ + if (bdev->ee >= val) + return -EINVAL; + + val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES)); + bdev->num_channels = val & BAM_NUM_PIPES_MASK; + + /* s/w reset bam */ + /* after reset all pipes are disabled and idle */ + val = readl_relaxed(bam_addr(bdev, 0, BAM_CTRL)); + val |= BAM_SW_RST; + writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL)); + val &= ~BAM_SW_RST; + writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL)); + + /* make sure previous stores are visible before enabling BAM */ + wmb(); + + /* enable bam */ + val |= BAM_EN; + writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL)); + + /* set descriptor threshhold, start with 4 bytes */ + writel_relaxed(DEFAULT_CNT_THRSHLD, + bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD)); + + /* Enable default set of h/w workarounds, ie all except BAM_FULL_PIPE */ + writel_relaxed(BAM_CNFG_BITS_DEFAULT, bam_addr(bdev, 0, BAM_CNFG_BITS)); + + /* enable irqs for errors */ + writel_relaxed(BAM_ERROR_EN | BAM_HRESP_ERR_EN, + bam_addr(bdev, 0, BAM_IRQ_EN)); + + /* unmask global bam interrupt */ + writel_relaxed(BAM_IRQ_MSK, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE)); + + return 0; +} + +static void bam_channel_init(struct bam_device *bdev, struct bam_chan *bchan, + u32 index) +{ + bchan->id = index; + bchan->bdev = bdev; + + vchan_init(&bchan->vc, &bdev->common); + bchan->vc.desc_free = bam_dma_free_desc; +} + +static const struct of_device_id bam_of_match[] = { + { .compatible = "qcom,bam-v1.3.0", .data = &bam_v1_3_reg_info }, + { .compatible = "qcom,bam-v1.4.0", .data = &bam_v1_4_reg_info }, + { .compatible = "qcom,bam-v1.7.0", .data = &bam_v1_7_reg_info }, + {} +}; + +MODULE_DEVICE_TABLE(of, bam_of_match); + +static int bam_dma_probe(struct platform_device *pdev) +{ + struct bam_device *bdev; + const struct of_device_id *match; + struct resource *iores; + int ret, i; + + bdev = devm_kzalloc(&pdev->dev, sizeof(*bdev), GFP_KERNEL); + if (!bdev) + return -ENOMEM; + + bdev->dev = &pdev->dev; + + match = of_match_node(bam_of_match, pdev->dev.of_node); + if (!match) { + dev_err(&pdev->dev, "Unsupported BAM module\n"); + return -ENODEV; + } + + bdev->layout = match->data; + + iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); + bdev->regs = devm_ioremap_resource(&pdev->dev, iores); + if (IS_ERR(bdev->regs)) + return PTR_ERR(bdev->regs); + + bdev->irq = platform_get_irq(pdev, 0); + if (bdev->irq < 0) + return bdev->irq; + + ret = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &bdev->ee); + if (ret) { + dev_err(bdev->dev, "Execution environment unspecified\n"); + return ret; + } + + bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk"); + if (IS_ERR(bdev->bamclk)) + return PTR_ERR(bdev->bamclk); + + ret = clk_prepare_enable(bdev->bamclk); + if (ret) { + dev_err(bdev->dev, "failed to prepare/enable clock\n"); + return ret; + } + + ret = bam_init(bdev); + if (ret) + goto err_disable_clk; + + tasklet_init(&bdev->task, dma_tasklet, (unsigned long)bdev); + + bdev->channels = devm_kcalloc(bdev->dev, bdev->num_channels, + sizeof(*bdev->channels), GFP_KERNEL); + + if (!bdev->channels) { + ret = -ENOMEM; + goto err_tasklet_kill; + } + + /* allocate and initialize channels */ + INIT_LIST_HEAD(&bdev->common.channels); + + for (i = 0; i < bdev->num_channels; i++) + bam_channel_init(bdev, &bdev->channels[i], i); + + ret = devm_request_irq(bdev->dev, bdev->irq, bam_dma_irq, + IRQF_TRIGGER_HIGH, "bam_dma", bdev); + if (ret) + goto err_bam_channel_exit; + + /* set max dma segment size */ + bdev->common.dev = bdev->dev; + bdev->common.dev->dma_parms = &bdev->dma_parms; + ret = dma_set_max_seg_size(bdev->common.dev, BAM_MAX_DATA_SIZE); + if (ret) { + dev_err(bdev->dev, "cannot set maximum segment size\n"); + goto err_bam_channel_exit; + } + + platform_set_drvdata(pdev, bdev); + + /* set capabilities */ + dma_cap_zero(bdev->common.cap_mask); + dma_cap_set(DMA_SLAVE, bdev->common.cap_mask); + + /* initialize dmaengine apis */ + bdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + bdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; + bdev->common.src_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES; + bdev->common.dst_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES; + bdev->common.device_alloc_chan_resources = bam_alloc_chan; + bdev->common.device_free_chan_resources = bam_free_chan; + bdev->common.device_prep_slave_sg = bam_prep_slave_sg; + bdev->common.device_config = bam_slave_config; + bdev->common.device_pause = bam_pause; + bdev->common.device_resume = bam_resume; + bdev->common.device_terminate_all = bam_dma_terminate_all; + bdev->common.device_issue_pending = bam_issue_pending; + bdev->common.device_tx_status = bam_tx_status; + bdev->common.dev = bdev->dev; + + ret = dma_async_device_register(&bdev->common); + if (ret) { + dev_err(bdev->dev, "failed to register dma async device\n"); + goto err_bam_channel_exit; + } + + ret = of_dma_controller_register(pdev->dev.of_node, bam_dma_xlate, + &bdev->common); + if (ret) + goto err_unregister_dma; + + return 0; + +err_unregister_dma: + dma_async_device_unregister(&bdev->common); +err_bam_channel_exit: + for (i = 0; i < bdev->num_channels; i++) + tasklet_kill(&bdev->channels[i].vc.task); +err_tasklet_kill: + tasklet_kill(&bdev->task); +err_disable_clk: + clk_disable_unprepare(bdev->bamclk); + + return ret; +} + +static int bam_dma_remove(struct platform_device *pdev) +{ + struct bam_device *bdev = platform_get_drvdata(pdev); + u32 i; + + of_dma_controller_free(pdev->dev.of_node); + dma_async_device_unregister(&bdev->common); + + /* mask all interrupts for this execution environment */ + writel_relaxed(0, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE)); + + devm_free_irq(bdev->dev, bdev->irq, bdev); + + for (i = 0; i < bdev->num_channels; i++) { + bam_dma_terminate_all(&bdev->channels[i].vc.chan); + tasklet_kill(&bdev->channels[i].vc.task); + + dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE, + bdev->channels[i].fifo_virt, + bdev->channels[i].fifo_phys); + } + + tasklet_kill(&bdev->task); + + clk_disable_unprepare(bdev->bamclk); + + return 0; +} + +static struct platform_driver bam_dma_driver = { + .probe = bam_dma_probe, + .remove = bam_dma_remove, + .driver = { + .name = "bam-dma-engine", + .of_match_table = bam_of_match, + }, +}; + +module_platform_driver(bam_dma_driver); + +MODULE_AUTHOR("Andy Gross "); +MODULE_DESCRIPTION("QCOM BAM DMA engine driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/dma/qcom_bam_dma.c b/drivers/dma/qcom_bam_dma.c deleted file mode 100644 index 37f7aec..0000000 --- a/drivers/dma/qcom_bam_dma.c +++ /dev/null @@ -1,1262 +0,0 @@ -/* - * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ -/* - * QCOM BAM DMA engine driver - * - * QCOM BAM DMA blocks are distributed amongst a number of the on-chip - * peripherals on the MSM 8x74. The configuration of the channels are dependent - * on the way they are hard wired to that specific peripheral. The peripheral - * device tree entries specify the configuration of each channel. - * - * The DMA controller requires the use of external memory for storage of the - * hardware descriptors for each channel. The descriptor FIFO is accessed as a - * circular buffer and operations are managed according to the offset within the - * FIFO. After pipe/channel reset, all of the pipe registers and internal state - * are back to defaults. - * - * During DMA operations, we write descriptors to the FIFO, being careful to - * handle wrapping and then write the last FIFO offset to that channel's - * P_EVNT_REG register to kick off the transaction. The P_SW_OFSTS register - * indicates the current FIFO offset that is being processed, so there is some - * indication of where the hardware is currently working. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "dmaengine.h" -#include "virt-dma.h" - -struct bam_desc_hw { - __le32 addr; /* Buffer physical address */ - __le16 size; /* Buffer size in bytes */ - __le16 flags; -}; - -#define DESC_FLAG_INT BIT(15) -#define DESC_FLAG_EOT BIT(14) -#define DESC_FLAG_EOB BIT(13) -#define DESC_FLAG_NWD BIT(12) - -struct bam_async_desc { - struct virt_dma_desc vd; - - u32 num_desc; - u32 xfer_len; - - /* transaction flags, EOT|EOB|NWD */ - u16 flags; - - struct bam_desc_hw *curr_desc; - - enum dma_transfer_direction dir; - size_t length; - struct bam_desc_hw desc[0]; -}; - -enum bam_reg { - BAM_CTRL, - BAM_REVISION, - BAM_NUM_PIPES, - BAM_DESC_CNT_TRSHLD, - BAM_IRQ_SRCS, - BAM_IRQ_SRCS_MSK, - BAM_IRQ_SRCS_UNMASKED, - BAM_IRQ_STTS, - BAM_IRQ_CLR, - BAM_IRQ_EN, - BAM_CNFG_BITS, - BAM_IRQ_SRCS_EE, - BAM_IRQ_SRCS_MSK_EE, - BAM_P_CTRL, - BAM_P_RST, - BAM_P_HALT, - BAM_P_IRQ_STTS, - BAM_P_IRQ_CLR, - BAM_P_IRQ_EN, - BAM_P_EVNT_DEST_ADDR, - BAM_P_EVNT_REG, - BAM_P_SW_OFSTS, - BAM_P_DATA_FIFO_ADDR, - BAM_P_DESC_FIFO_ADDR, - BAM_P_EVNT_GEN_TRSHLD, - BAM_P_FIFO_SIZES, -}; - -struct reg_offset_data { - u32 base_offset; - unsigned int pipe_mult, evnt_mult, ee_mult; -}; - -static const struct reg_offset_data bam_v1_3_reg_info[] = { - [BAM_CTRL] = { 0x0F80, 0x00, 0x00, 0x00 }, - [BAM_REVISION] = { 0x0F84, 0x00, 0x00, 0x00 }, - [BAM_NUM_PIPES] = { 0x0FBC, 0x00, 0x00, 0x00 }, - [BAM_DESC_CNT_TRSHLD] = { 0x0F88, 0x00, 0x00, 0x00 }, - [BAM_IRQ_SRCS] = { 0x0F8C, 0x00, 0x00, 0x00 }, - [BAM_IRQ_SRCS_MSK] = { 0x0F90, 0x00, 0x00, 0x00 }, - [BAM_IRQ_SRCS_UNMASKED] = { 0x0FB0, 0x00, 0x00, 0x00 }, - [BAM_IRQ_STTS] = { 0x0F94, 0x00, 0x00, 0x00 }, - [BAM_IRQ_CLR] = { 0x0F98, 0x00, 0x00, 0x00 }, - [BAM_IRQ_EN] = { 0x0F9C, 0x00, 0x00, 0x00 }, - [BAM_CNFG_BITS] = { 0x0FFC, 0x00, 0x00, 0x00 }, - [BAM_IRQ_SRCS_EE] = { 0x1800, 0x00, 0x00, 0x80 }, - [BAM_IRQ_SRCS_MSK_EE] = { 0x1804, 0x00, 0x00, 0x80 }, - [BAM_P_CTRL] = { 0x0000, 0x80, 0x00, 0x00 }, - [BAM_P_RST] = { 0x0004, 0x80, 0x00, 0x00 }, - [BAM_P_HALT] = { 0x0008, 0x80, 0x00, 0x00 }, - [BAM_P_IRQ_STTS] = { 0x0010, 0x80, 0x00, 0x00 }, - [BAM_P_IRQ_CLR] = { 0x0014, 0x80, 0x00, 0x00 }, - [BAM_P_IRQ_EN] = { 0x0018, 0x80, 0x00, 0x00 }, - [BAM_P_EVNT_DEST_ADDR] = { 0x102C, 0x00, 0x40, 0x00 }, - [BAM_P_EVNT_REG] = { 0x1018, 0x00, 0x40, 0x00 }, - [BAM_P_SW_OFSTS] = { 0x1000, 0x00, 0x40, 0x00 }, - [BAM_P_DATA_FIFO_ADDR] = { 0x1024, 0x00, 0x40, 0x00 }, - [BAM_P_DESC_FIFO_ADDR] = { 0x101C, 0x00, 0x40, 0x00 }, - [BAM_P_EVNT_GEN_TRSHLD] = { 0x1028, 0x00, 0x40, 0x00 }, - [BAM_P_FIFO_SIZES] = { 0x1020, 0x00, 0x40, 0x00 }, -}; - -static const struct reg_offset_data bam_v1_4_reg_info[] = { - [BAM_CTRL] = { 0x0000, 0x00, 0x00, 0x00 }, - [BAM_REVISION] = { 0x0004, 0x00, 0x00, 0x00 }, - [BAM_NUM_PIPES] = { 0x003C, 0x00, 0x00, 0x00 }, - [BAM_DESC_CNT_TRSHLD] = { 0x0008, 0x00, 0x00, 0x00 }, - [BAM_IRQ_SRCS] = { 0x000C, 0x00, 0x00, 0x00 }, - [BAM_IRQ_SRCS_MSK] = { 0x0010, 0x00, 0x00, 0x00 }, - [BAM_IRQ_SRCS_UNMASKED] = { 0x0030, 0x00, 0x00, 0x00 }, - [BAM_IRQ_STTS] = { 0x0014, 0x00, 0x00, 0x00 }, - [BAM_IRQ_CLR] = { 0x0018, 0x00, 0x00, 0x00 }, - [BAM_IRQ_EN] = { 0x001C, 0x00, 0x00, 0x00 }, - [BAM_CNFG_BITS] = { 0x007C, 0x00, 0x00, 0x00 }, - [BAM_IRQ_SRCS_EE] = { 0x0800, 0x00, 0x00, 0x80 }, - [BAM_IRQ_SRCS_MSK_EE] = { 0x0804, 0x00, 0x00, 0x80 }, - [BAM_P_CTRL] = { 0x1000, 0x1000, 0x00, 0x00 }, - [BAM_P_RST] = { 0x1004, 0x1000, 0x00, 0x00 }, - [BAM_P_HALT] = { 0x1008, 0x1000, 0x00, 0x00 }, - [BAM_P_IRQ_STTS] = { 0x1010, 0x1000, 0x00, 0x00 }, - [BAM_P_IRQ_CLR] = { 0x1014, 0x1000, 0x00, 0x00 }, - [BAM_P_IRQ_EN] = { 0x1018, 0x1000, 0x00, 0x00 }, - [BAM_P_EVNT_DEST_ADDR] = { 0x182C, 0x00, 0x1000, 0x00 }, - [BAM_P_EVNT_REG] = { 0x1818, 0x00, 0x1000, 0x00 }, - [BAM_P_SW_OFSTS] = { 0x1800, 0x00, 0x1000, 0x00 }, - [BAM_P_DATA_FIFO_ADDR] = { 0x1824, 0x00, 0x1000, 0x00 }, - [BAM_P_DESC_FIFO_ADDR] = { 0x181C, 0x00, 0x1000, 0x00 }, - [BAM_P_EVNT_GEN_TRSHLD] = { 0x1828, 0x00, 0x1000, 0x00 }, - [BAM_P_FIFO_SIZES] = { 0x1820, 0x00, 0x1000, 0x00 }, -}; - -static const struct reg_offset_data bam_v1_7_reg_info[] = { - [BAM_CTRL] = { 0x00000, 0x00, 0x00, 0x00 }, - [BAM_REVISION] = { 0x01000, 0x00, 0x00, 0x00 }, - [BAM_NUM_PIPES] = { 0x01008, 0x00, 0x00, 0x00 }, - [BAM_DESC_CNT_TRSHLD] = { 0x00008, 0x00, 0x00, 0x00 }, - [BAM_IRQ_SRCS] = { 0x03010, 0x00, 0x00, 0x00 }, - [BAM_IRQ_SRCS_MSK] = { 0x03014, 0x00, 0x00, 0x00 }, - [BAM_IRQ_SRCS_UNMASKED] = { 0x03018, 0x00, 0x00, 0x00 }, - [BAM_IRQ_STTS] = { 0x00014, 0x00, 0x00, 0x00 }, - [BAM_IRQ_CLR] = { 0x00018, 0x00, 0x00, 0x00 }, - [BAM_IRQ_EN] = { 0x0001C, 0x00, 0x00, 0x00 }, - [BAM_CNFG_BITS] = { 0x0007C, 0x00, 0x00, 0x00 }, - [BAM_IRQ_SRCS_EE] = { 0x03000, 0x00, 0x00, 0x1000 }, - [BAM_IRQ_SRCS_MSK_EE] = { 0x03004, 0x00, 0x00, 0x1000 }, - [BAM_P_CTRL] = { 0x13000, 0x1000, 0x00, 0x00 }, - [BAM_P_RST] = { 0x13004, 0x1000, 0x00, 0x00 }, - [BAM_P_HALT] = { 0x13008, 0x1000, 0x00, 0x00 }, - [BAM_P_IRQ_STTS] = { 0x13010, 0x1000, 0x00, 0x00 }, - [BAM_P_IRQ_CLR] = { 0x13014, 0x1000, 0x00, 0x00 }, - [BAM_P_IRQ_EN] = { 0x13018, 0x1000, 0x00, 0x00 }, - [BAM_P_EVNT_DEST_ADDR] = { 0x1382C, 0x00, 0x1000, 0x00 }, - [BAM_P_EVNT_REG] = { 0x13818, 0x00, 0x1000, 0x00 }, - [BAM_P_SW_OFSTS] = { 0x13800, 0x00, 0x1000, 0x00 }, - [BAM_P_DATA_FIFO_ADDR] = { 0x13824, 0x00, 0x1000, 0x00 }, - [BAM_P_DESC_FIFO_ADDR] = { 0x1381C, 0x00, 0x1000, 0x00 }, - [BAM_P_EVNT_GEN_TRSHLD] = { 0x13828, 0x00, 0x1000, 0x00 }, - [BAM_P_FIFO_SIZES] = { 0x13820, 0x00, 0x1000, 0x00 }, -}; - -/* BAM CTRL */ -#define BAM_SW_RST BIT(0) -#define BAM_EN BIT(1) -#define BAM_EN_ACCUM BIT(4) -#define BAM_TESTBUS_SEL_SHIFT 5 -#define BAM_TESTBUS_SEL_MASK 0x3F -#define BAM_DESC_CACHE_SEL_SHIFT 13 -#define BAM_DESC_CACHE_SEL_MASK 0x3 -#define BAM_CACHED_DESC_STORE BIT(15) -#define IBC_DISABLE BIT(16) - -/* BAM REVISION */ -#define REVISION_SHIFT 0 -#define REVISION_MASK 0xFF -#define NUM_EES_SHIFT 8 -#define NUM_EES_MASK 0xF -#define CE_BUFFER_SIZE BIT(13) -#define AXI_ACTIVE BIT(14) -#define USE_VMIDMT BIT(15) -#define SECURED BIT(16) -#define BAM_HAS_NO_BYPASS BIT(17) -#define HIGH_FREQUENCY_BAM BIT(18) -#define INACTIV_TMRS_EXST BIT(19) -#define NUM_INACTIV_TMRS BIT(20) -#define DESC_CACHE_DEPTH_SHIFT 21 -#define DESC_CACHE_DEPTH_1 (0 << DESC_CACHE_DEPTH_SHIFT) -#define DESC_CACHE_DEPTH_2 (1 << DESC_CACHE_DEPTH_SHIFT) -#define DESC_CACHE_DEPTH_3 (2 << DESC_CACHE_DEPTH_SHIFT) -#define DESC_CACHE_DEPTH_4 (3 << DESC_CACHE_DEPTH_SHIFT) -#define CMD_DESC_EN BIT(23) -#define INACTIV_TMR_BASE_SHIFT 24 -#define INACTIV_TMR_BASE_MASK 0xFF - -/* BAM NUM PIPES */ -#define BAM_NUM_PIPES_SHIFT 0 -#define BAM_NUM_PIPES_MASK 0xFF -#define PERIPH_NON_PIPE_GRP_SHIFT 16 -#define PERIPH_NON_PIP_GRP_MASK 0xFF -#define BAM_NON_PIPE_GRP_SHIFT 24 -#define BAM_NON_PIPE_GRP_MASK 0xFF - -/* BAM CNFG BITS */ -#define BAM_PIPE_CNFG BIT(2) -#define BAM_FULL_PIPE BIT(11) -#define BAM_NO_EXT_P_RST BIT(12) -#define BAM_IBC_DISABLE BIT(13) -#define BAM_SB_CLK_REQ BIT(14) -#define BAM_PSM_CSW_REQ BIT(15) -#define BAM_PSM_P_RES BIT(16) -#define BAM_AU_P_RES BIT(17) -#define BAM_SI_P_RES BIT(18) -#define BAM_WB_P_RES BIT(19) -#define BAM_WB_BLK_CSW BIT(20) -#define BAM_WB_CSW_ACK_IDL BIT(21) -#define BAM_WB_RETR_SVPNT BIT(22) -#define BAM_WB_DSC_AVL_P_RST BIT(23) -#define BAM_REG_P_EN BIT(24) -#define BAM_PSM_P_HD_DATA BIT(25) -#define BAM_AU_ACCUMED BIT(26) -#define BAM_CMD_ENABLE BIT(27) - -#define BAM_CNFG_BITS_DEFAULT (BAM_PIPE_CNFG | \ - BAM_NO_EXT_P_RST | \ - BAM_IBC_DISABLE | \ - BAM_SB_CLK_REQ | \ - BAM_PSM_CSW_REQ | \ - BAM_PSM_P_RES | \ - BAM_AU_P_RES | \ - BAM_SI_P_RES | \ - BAM_WB_P_RES | \ - BAM_WB_BLK_CSW | \ - BAM_WB_CSW_ACK_IDL | \ - BAM_WB_RETR_SVPNT | \ - BAM_WB_DSC_AVL_P_RST | \ - BAM_REG_P_EN | \ - BAM_PSM_P_HD_DATA | \ - BAM_AU_ACCUMED | \ - BAM_CMD_ENABLE) - -/* PIPE CTRL */ -#define P_EN BIT(1) -#define P_DIRECTION BIT(3) -#define P_SYS_STRM BIT(4) -#define P_SYS_MODE BIT(5) -#define P_AUTO_EOB BIT(6) -#define P_AUTO_EOB_SEL_SHIFT 7 -#define P_AUTO_EOB_SEL_512 (0 << P_AUTO_EOB_SEL_SHIFT) -#define P_AUTO_EOB_SEL_256 (1 << P_AUTO_EOB_SEL_SHIFT) -#define P_AUTO_EOB_SEL_128 (2 << P_AUTO_EOB_SEL_SHIFT) -#define P_AUTO_EOB_SEL_64 (3 << P_AUTO_EOB_SEL_SHIFT) -#define P_PREFETCH_LIMIT_SHIFT 9 -#define P_PREFETCH_LIMIT_32 (0 << P_PREFETCH_LIMIT_SHIFT) -#define P_PREFETCH_LIMIT_16 (1 << P_PREFETCH_LIMIT_SHIFT) -#define P_PREFETCH_LIMIT_4 (2 << P_PREFETCH_LIMIT_SHIFT) -#define P_WRITE_NWD BIT(11) -#define P_LOCK_GROUP_SHIFT 16 -#define P_LOCK_GROUP_MASK 0x1F - -/* BAM_DESC_CNT_TRSHLD */ -#define CNT_TRSHLD 0xffff -#define DEFAULT_CNT_THRSHLD 0x4 - -/* BAM_IRQ_SRCS */ -#define BAM_IRQ BIT(31) -#define P_IRQ 0x7fffffff - -/* BAM_IRQ_SRCS_MSK */ -#define BAM_IRQ_MSK BAM_IRQ -#define P_IRQ_MSK P_IRQ - -/* BAM_IRQ_STTS */ -#define BAM_TIMER_IRQ BIT(4) -#define BAM_EMPTY_IRQ BIT(3) -#define BAM_ERROR_IRQ BIT(2) -#define BAM_HRESP_ERR_IRQ BIT(1) - -/* BAM_IRQ_CLR */ -#define BAM_TIMER_CLR BIT(4) -#define BAM_EMPTY_CLR BIT(3) -#define BAM_ERROR_CLR BIT(2) -#define BAM_HRESP_ERR_CLR BIT(1) - -/* BAM_IRQ_EN */ -#define BAM_TIMER_EN BIT(4) -#define BAM_EMPTY_EN BIT(3) -#define BAM_ERROR_EN BIT(2) -#define BAM_HRESP_ERR_EN BIT(1) - -/* BAM_P_IRQ_EN */ -#define P_PRCSD_DESC_EN BIT(0) -#define P_TIMER_EN BIT(1) -#define P_WAKE_EN BIT(2) -#define P_OUT_OF_DESC_EN BIT(3) -#define P_ERR_EN BIT(4) -#define P_TRNSFR_END_EN BIT(5) -#define P_DEFAULT_IRQS_EN (P_PRCSD_DESC_EN | P_ERR_EN | P_TRNSFR_END_EN) - -/* BAM_P_SW_OFSTS */ -#define P_SW_OFSTS_MASK 0xffff - -#define BAM_DESC_FIFO_SIZE SZ_32K -#define MAX_DESCRIPTORS (BAM_DESC_FIFO_SIZE / sizeof(struct bam_desc_hw) - 1) -#define BAM_MAX_DATA_SIZE (SZ_32K - 8) - -struct bam_chan { - struct virt_dma_chan vc; - - struct bam_device *bdev; - - /* configuration from device tree */ - u32 id; - - struct bam_async_desc *curr_txd; /* current running dma */ - - /* runtime configuration */ - struct dma_slave_config slave; - - /* fifo storage */ - struct bam_desc_hw *fifo_virt; - dma_addr_t fifo_phys; - - /* fifo markers */ - unsigned short head; /* start of active descriptor entries */ - unsigned short tail; /* end of active descriptor entries */ - - unsigned int initialized; /* is the channel hw initialized? */ - unsigned int paused; /* is the channel paused? */ - unsigned int reconfigure; /* new slave config? */ - - struct list_head node; -}; - -static inline struct bam_chan *to_bam_chan(struct dma_chan *common) -{ - return container_of(common, struct bam_chan, vc.chan); -} - -struct bam_device { - void __iomem *regs; - struct device *dev; - struct dma_device common; - struct device_dma_parameters dma_parms; - struct bam_chan *channels; - u32 num_channels; - - /* execution environment ID, from DT */ - u32 ee; - - const struct reg_offset_data *layout; - - struct clk *bamclk; - int irq; - - /* dma start transaction tasklet */ - struct tasklet_struct task; -}; - -/** - * bam_addr - returns BAM register address - * @bdev: bam device - * @pipe: pipe instance (ignored when register doesn't have multiple instances) - * @reg: register enum - */ -static inline void __iomem *bam_addr(struct bam_device *bdev, u32 pipe, - enum bam_reg reg) -{ - const struct reg_offset_data r = bdev->layout[reg]; - - return bdev->regs + r.base_offset + - r.pipe_mult * pipe + - r.evnt_mult * pipe + - r.ee_mult * bdev->ee; -} - -/** - * bam_reset_channel - Reset individual BAM DMA channel - * @bchan: bam channel - * - * This function resets a specific BAM channel - */ -static void bam_reset_channel(struct bam_chan *bchan) -{ - struct bam_device *bdev = bchan->bdev; - - lockdep_assert_held(&bchan->vc.lock); - - /* reset channel */ - writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_RST)); - writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_RST)); - - /* don't allow cpu to reorder BAM register accesses done after this */ - wmb(); - - /* make sure hw is initialized when channel is used the first time */ - bchan->initialized = 0; -} - -/** - * bam_chan_init_hw - Initialize channel hardware - * @bchan: bam channel - * - * This function resets and initializes the BAM channel - */ -static void bam_chan_init_hw(struct bam_chan *bchan, - enum dma_transfer_direction dir) -{ - struct bam_device *bdev = bchan->bdev; - u32 val; - - /* Reset the channel to clear internal state of the FIFO */ - bam_reset_channel(bchan); - - /* - * write out 8 byte aligned address. We have enough space for this - * because we allocated 1 more descriptor (8 bytes) than we can use - */ - writel_relaxed(ALIGN(bchan->fifo_phys, sizeof(struct bam_desc_hw)), - bam_addr(bdev, bchan->id, BAM_P_DESC_FIFO_ADDR)); - writel_relaxed(BAM_DESC_FIFO_SIZE, - bam_addr(bdev, bchan->id, BAM_P_FIFO_SIZES)); - - /* enable the per pipe interrupts, enable EOT, ERR, and INT irqs */ - writel_relaxed(P_DEFAULT_IRQS_EN, - bam_addr(bdev, bchan->id, BAM_P_IRQ_EN)); - - /* unmask the specific pipe and EE combo */ - val = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE)); - val |= BIT(bchan->id); - writel_relaxed(val, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE)); - - /* don't allow cpu to reorder the channel enable done below */ - wmb(); - - /* set fixed direction and mode, then enable channel */ - val = P_EN | P_SYS_MODE; - if (dir == DMA_DEV_TO_MEM) - val |= P_DIRECTION; - - writel_relaxed(val, bam_addr(bdev, bchan->id, BAM_P_CTRL)); - - bchan->initialized = 1; - - /* init FIFO pointers */ - bchan->head = 0; - bchan->tail = 0; -} - -/** - * bam_alloc_chan - Allocate channel resources for DMA channel. - * @chan: specified channel - * - * This function allocates the FIFO descriptor memory - */ -static int bam_alloc_chan(struct dma_chan *chan) -{ - struct bam_chan *bchan = to_bam_chan(chan); - struct bam_device *bdev = bchan->bdev; - - if (bchan->fifo_virt) - return 0; - - /* allocate FIFO descriptor space, but only if necessary */ - bchan->fifo_virt = dma_alloc_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE, - &bchan->fifo_phys, GFP_KERNEL); - - if (!bchan->fifo_virt) { - dev_err(bdev->dev, "Failed to allocate desc fifo\n"); - return -ENOMEM; - } - - return 0; -} - -/** - * bam_free_chan - Frees dma resources associated with specific channel - * @chan: specified channel - * - * Free the allocated fifo descriptor memory and channel resources - * - */ -static void bam_free_chan(struct dma_chan *chan) -{ - struct bam_chan *bchan = to_bam_chan(chan); - struct bam_device *bdev = bchan->bdev; - u32 val; - unsigned long flags; - - vchan_free_chan_resources(to_virt_chan(chan)); - - if (bchan->curr_txd) { - dev_err(bchan->bdev->dev, "Cannot free busy channel\n"); - return; - } - - spin_lock_irqsave(&bchan->vc.lock, flags); - bam_reset_channel(bchan); - spin_unlock_irqrestore(&bchan->vc.lock, flags); - - dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE, bchan->fifo_virt, - bchan->fifo_phys); - bchan->fifo_virt = NULL; - - /* mask irq for pipe/channel */ - val = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE)); - val &= ~BIT(bchan->id); - writel_relaxed(val, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE)); - - /* disable irq */ - writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_IRQ_EN)); -} - -/** - * bam_slave_config - set slave configuration for channel - * @chan: dma channel - * @cfg: slave configuration - * - * Sets slave configuration for channel - * - */ -static int bam_slave_config(struct dma_chan *chan, - struct dma_slave_config *cfg) -{ - struct bam_chan *bchan = to_bam_chan(chan); - unsigned long flag; - - spin_lock_irqsave(&bchan->vc.lock, flag); - memcpy(&bchan->slave, cfg, sizeof(*cfg)); - bchan->reconfigure = 1; - spin_unlock_irqrestore(&bchan->vc.lock, flag); - - return 0; -} - -/** - * bam_prep_slave_sg - Prep slave sg transaction - * - * @chan: dma channel - * @sgl: scatter gather list - * @sg_len: length of sg - * @direction: DMA transfer direction - * @flags: DMA flags - * @context: transfer context (unused) - */ -static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan, - struct scatterlist *sgl, unsigned int sg_len, - enum dma_transfer_direction direction, unsigned long flags, - void *context) -{ - struct bam_chan *bchan = to_bam_chan(chan); - struct bam_device *bdev = bchan->bdev; - struct bam_async_desc *async_desc; - struct scatterlist *sg; - u32 i; - struct bam_desc_hw *desc; - unsigned int num_alloc = 0; - - - if (!is_slave_direction(direction)) { - dev_err(bdev->dev, "invalid dma direction\n"); - return NULL; - } - - /* calculate number of required entries */ - for_each_sg(sgl, sg, sg_len, i) - num_alloc += DIV_ROUND_UP(sg_dma_len(sg), BAM_MAX_DATA_SIZE); - - /* allocate enough room to accomodate the number of entries */ - async_desc = kzalloc(sizeof(*async_desc) + - (num_alloc * sizeof(struct bam_desc_hw)), GFP_NOWAIT); - - if (!async_desc) - goto err_out; - - if (flags & DMA_PREP_FENCE) - async_desc->flags |= DESC_FLAG_NWD; - - if (flags & DMA_PREP_INTERRUPT) - async_desc->flags |= DESC_FLAG_EOT; - else - async_desc->flags |= DESC_FLAG_INT; - - async_desc->num_desc = num_alloc; - async_desc->curr_desc = async_desc->desc; - async_desc->dir = direction; - - /* fill in temporary descriptors */ - desc = async_desc->desc; - for_each_sg(sgl, sg, sg_len, i) { - unsigned int remainder = sg_dma_len(sg); - unsigned int curr_offset = 0; - - do { - desc->addr = cpu_to_le32(sg_dma_address(sg) + - curr_offset); - - if (remainder > BAM_MAX_DATA_SIZE) { - desc->size = cpu_to_le16(BAM_MAX_DATA_SIZE); - remainder -= BAM_MAX_DATA_SIZE; - curr_offset += BAM_MAX_DATA_SIZE; - } else { - desc->size = cpu_to_le16(remainder); - remainder = 0; - } - - async_desc->length += desc->size; - desc++; - } while (remainder > 0); - } - - return vchan_tx_prep(&bchan->vc, &async_desc->vd, flags); - -err_out: - kfree(async_desc); - return NULL; -} - -/** - * bam_dma_terminate_all - terminate all transactions on a channel - * @bchan: bam dma channel - * - * Dequeues and frees all transactions - * No callbacks are done - * - */ -static int bam_dma_terminate_all(struct dma_chan *chan) -{ - struct bam_chan *bchan = to_bam_chan(chan); - unsigned long flag; - LIST_HEAD(head); - - /* remove all transactions, including active transaction */ - spin_lock_irqsave(&bchan->vc.lock, flag); - if (bchan->curr_txd) { - list_add(&bchan->curr_txd->vd.node, &bchan->vc.desc_issued); - bchan->curr_txd = NULL; - } - - vchan_get_all_descriptors(&bchan->vc, &head); - spin_unlock_irqrestore(&bchan->vc.lock, flag); - - vchan_dma_desc_free_list(&bchan->vc, &head); - - return 0; -} - -/** - * bam_pause - Pause DMA channel - * @chan: dma channel - * - */ -static int bam_pause(struct dma_chan *chan) -{ - struct bam_chan *bchan = to_bam_chan(chan); - struct bam_device *bdev = bchan->bdev; - unsigned long flag; - - spin_lock_irqsave(&bchan->vc.lock, flag); - writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT)); - bchan->paused = 1; - spin_unlock_irqrestore(&bchan->vc.lock, flag); - - return 0; -} - -/** - * bam_resume - Resume DMA channel operations - * @chan: dma channel - * - */ -static int bam_resume(struct dma_chan *chan) -{ - struct bam_chan *bchan = to_bam_chan(chan); - struct bam_device *bdev = bchan->bdev; - unsigned long flag; - - spin_lock_irqsave(&bchan->vc.lock, flag); - writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT)); - bchan->paused = 0; - spin_unlock_irqrestore(&bchan->vc.lock, flag); - - return 0; -} - -/** - * process_channel_irqs - processes the channel interrupts - * @bdev: bam controller - * - * This function processes the channel interrupts - * - */ -static u32 process_channel_irqs(struct bam_device *bdev) -{ - u32 i, srcs, pipe_stts; - unsigned long flags; - struct bam_async_desc *async_desc; - - srcs = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_EE)); - - /* return early if no pipe/channel interrupts are present */ - if (!(srcs & P_IRQ)) - return srcs; - - for (i = 0; i < bdev->num_channels; i++) { - struct bam_chan *bchan = &bdev->channels[i]; - - if (!(srcs & BIT(i))) - continue; - - /* clear pipe irq */ - pipe_stts = readl_relaxed(bam_addr(bdev, i, BAM_P_IRQ_STTS)); - - writel_relaxed(pipe_stts, bam_addr(bdev, i, BAM_P_IRQ_CLR)); - - spin_lock_irqsave(&bchan->vc.lock, flags); - async_desc = bchan->curr_txd; - - if (async_desc) { - async_desc->num_desc -= async_desc->xfer_len; - async_desc->curr_desc += async_desc->xfer_len; - bchan->curr_txd = NULL; - - /* manage FIFO */ - bchan->head += async_desc->xfer_len; - bchan->head %= MAX_DESCRIPTORS; - - /* - * if complete, process cookie. Otherwise - * push back to front of desc_issued so that - * it gets restarted by the tasklet - */ - if (!async_desc->num_desc) - vchan_cookie_complete(&async_desc->vd); - else - list_add(&async_desc->vd.node, - &bchan->vc.desc_issued); - } - - spin_unlock_irqrestore(&bchan->vc.lock, flags); - } - - return srcs; -} - -/** - * bam_dma_irq - irq handler for bam controller - * @irq: IRQ of interrupt - * @data: callback data - * - * IRQ handler for the bam controller - */ -static irqreturn_t bam_dma_irq(int irq, void *data) -{ - struct bam_device *bdev = data; - u32 clr_mask = 0, srcs = 0; - - srcs |= process_channel_irqs(bdev); - - /* kick off tasklet to start next dma transfer */ - if (srcs & P_IRQ) - tasklet_schedule(&bdev->task); - - if (srcs & BAM_IRQ) - clr_mask = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_STTS)); - - /* don't allow reorder of the various accesses to the BAM registers */ - mb(); - - writel_relaxed(clr_mask, bam_addr(bdev, 0, BAM_IRQ_CLR)); - - return IRQ_HANDLED; -} - -/** - * bam_tx_status - returns status of transaction - * @chan: dma channel - * @cookie: transaction cookie - * @txstate: DMA transaction state - * - * Return status of dma transaction - */ -static enum dma_status bam_tx_status(struct dma_chan *chan, dma_cookie_t cookie, - struct dma_tx_state *txstate) -{ - struct bam_chan *bchan = to_bam_chan(chan); - struct virt_dma_desc *vd; - int ret; - size_t residue = 0; - unsigned int i; - unsigned long flags; - - ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_COMPLETE) - return ret; - - if (!txstate) - return bchan->paused ? DMA_PAUSED : ret; - - spin_lock_irqsave(&bchan->vc.lock, flags); - vd = vchan_find_desc(&bchan->vc, cookie); - if (vd) - residue = container_of(vd, struct bam_async_desc, vd)->length; - else if (bchan->curr_txd && bchan->curr_txd->vd.tx.cookie == cookie) - for (i = 0; i < bchan->curr_txd->num_desc; i++) - residue += bchan->curr_txd->curr_desc[i].size; - - spin_unlock_irqrestore(&bchan->vc.lock, flags); - - dma_set_residue(txstate, residue); - - if (ret == DMA_IN_PROGRESS && bchan->paused) - ret = DMA_PAUSED; - - return ret; -} - -/** - * bam_apply_new_config - * @bchan: bam dma channel - * @dir: DMA direction - */ -static void bam_apply_new_config(struct bam_chan *bchan, - enum dma_transfer_direction dir) -{ - struct bam_device *bdev = bchan->bdev; - u32 maxburst; - - if (dir == DMA_DEV_TO_MEM) - maxburst = bchan->slave.src_maxburst; - else - maxburst = bchan->slave.dst_maxburst; - - writel_relaxed(maxburst, bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD)); - - bchan->reconfigure = 0; -} - -/** - * bam_start_dma - start next transaction - * @bchan - bam dma channel - */ -static void bam_start_dma(struct bam_chan *bchan) -{ - struct virt_dma_desc *vd = vchan_next_desc(&bchan->vc); - struct bam_device *bdev = bchan->bdev; - struct bam_async_desc *async_desc; - struct bam_desc_hw *desc; - struct bam_desc_hw *fifo = PTR_ALIGN(bchan->fifo_virt, - sizeof(struct bam_desc_hw)); - - lockdep_assert_held(&bchan->vc.lock); - - if (!vd) - return; - - list_del(&vd->node); - - async_desc = container_of(vd, struct bam_async_desc, vd); - bchan->curr_txd = async_desc; - - /* on first use, initialize the channel hardware */ - if (!bchan->initialized) - bam_chan_init_hw(bchan, async_desc->dir); - - /* apply new slave config changes, if necessary */ - if (bchan->reconfigure) - bam_apply_new_config(bchan, async_desc->dir); - - desc = bchan->curr_txd->curr_desc; - - if (async_desc->num_desc > MAX_DESCRIPTORS) - async_desc->xfer_len = MAX_DESCRIPTORS; - else - async_desc->xfer_len = async_desc->num_desc; - - /* set any special flags on the last descriptor */ - if (async_desc->num_desc == async_desc->xfer_len) - desc[async_desc->xfer_len - 1].flags = - cpu_to_le16(async_desc->flags); - else - desc[async_desc->xfer_len - 1].flags |= - cpu_to_le16(DESC_FLAG_INT); - - if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) { - u32 partial = MAX_DESCRIPTORS - bchan->tail; - - memcpy(&fifo[bchan->tail], desc, - partial * sizeof(struct bam_desc_hw)); - memcpy(fifo, &desc[partial], (async_desc->xfer_len - partial) * - sizeof(struct bam_desc_hw)); - } else { - memcpy(&fifo[bchan->tail], desc, - async_desc->xfer_len * sizeof(struct bam_desc_hw)); - } - - bchan->tail += async_desc->xfer_len; - bchan->tail %= MAX_DESCRIPTORS; - - /* ensure descriptor writes and dma start not reordered */ - wmb(); - writel_relaxed(bchan->tail * sizeof(struct bam_desc_hw), - bam_addr(bdev, bchan->id, BAM_P_EVNT_REG)); -} - -/** - * dma_tasklet - DMA IRQ tasklet - * @data: tasklet argument (bam controller structure) - * - * Sets up next DMA operation and then processes all completed transactions - */ -static void dma_tasklet(unsigned long data) -{ - struct bam_device *bdev = (struct bam_device *)data; - struct bam_chan *bchan; - unsigned long flags; - unsigned int i; - - /* go through the channels and kick off transactions */ - for (i = 0; i < bdev->num_channels; i++) { - bchan = &bdev->channels[i]; - spin_lock_irqsave(&bchan->vc.lock, flags); - - if (!list_empty(&bchan->vc.desc_issued) && !bchan->curr_txd) - bam_start_dma(bchan); - spin_unlock_irqrestore(&bchan->vc.lock, flags); - } -} - -/** - * bam_issue_pending - starts pending transactions - * @chan: dma channel - * - * Calls tasklet directly which in turn starts any pending transactions - */ -static void bam_issue_pending(struct dma_chan *chan) -{ - struct bam_chan *bchan = to_bam_chan(chan); - unsigned long flags; - - spin_lock_irqsave(&bchan->vc.lock, flags); - - /* if work pending and idle, start a transaction */ - if (vchan_issue_pending(&bchan->vc) && !bchan->curr_txd) - bam_start_dma(bchan); - - spin_unlock_irqrestore(&bchan->vc.lock, flags); -} - -/** - * bam_dma_free_desc - free descriptor memory - * @vd: virtual descriptor - * - */ -static void bam_dma_free_desc(struct virt_dma_desc *vd) -{ - struct bam_async_desc *async_desc = container_of(vd, - struct bam_async_desc, vd); - - kfree(async_desc); -} - -static struct dma_chan *bam_dma_xlate(struct of_phandle_args *dma_spec, - struct of_dma *of) -{ - struct bam_device *bdev = container_of(of->of_dma_data, - struct bam_device, common); - unsigned int request; - - if (dma_spec->args_count != 1) - return NULL; - - request = dma_spec->args[0]; - if (request >= bdev->num_channels) - return NULL; - - return dma_get_slave_channel(&(bdev->channels[request].vc.chan)); -} - -/** - * bam_init - * @bdev: bam device - * - * Initialization helper for global bam registers - */ -static int bam_init(struct bam_device *bdev) -{ - u32 val; - - /* read revision and configuration information */ - val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION)) >> NUM_EES_SHIFT; - val &= NUM_EES_MASK; - - /* check that configured EE is within range */ - if (bdev->ee >= val) - return -EINVAL; - - val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES)); - bdev->num_channels = val & BAM_NUM_PIPES_MASK; - - /* s/w reset bam */ - /* after reset all pipes are disabled and idle */ - val = readl_relaxed(bam_addr(bdev, 0, BAM_CTRL)); - val |= BAM_SW_RST; - writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL)); - val &= ~BAM_SW_RST; - writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL)); - - /* make sure previous stores are visible before enabling BAM */ - wmb(); - - /* enable bam */ - val |= BAM_EN; - writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL)); - - /* set descriptor threshhold, start with 4 bytes */ - writel_relaxed(DEFAULT_CNT_THRSHLD, - bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD)); - - /* Enable default set of h/w workarounds, ie all except BAM_FULL_PIPE */ - writel_relaxed(BAM_CNFG_BITS_DEFAULT, bam_addr(bdev, 0, BAM_CNFG_BITS)); - - /* enable irqs for errors */ - writel_relaxed(BAM_ERROR_EN | BAM_HRESP_ERR_EN, - bam_addr(bdev, 0, BAM_IRQ_EN)); - - /* unmask global bam interrupt */ - writel_relaxed(BAM_IRQ_MSK, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE)); - - return 0; -} - -static void bam_channel_init(struct bam_device *bdev, struct bam_chan *bchan, - u32 index) -{ - bchan->id = index; - bchan->bdev = bdev; - - vchan_init(&bchan->vc, &bdev->common); - bchan->vc.desc_free = bam_dma_free_desc; -} - -static const struct of_device_id bam_of_match[] = { - { .compatible = "qcom,bam-v1.3.0", .data = &bam_v1_3_reg_info }, - { .compatible = "qcom,bam-v1.4.0", .data = &bam_v1_4_reg_info }, - { .compatible = "qcom,bam-v1.7.0", .data = &bam_v1_7_reg_info }, - {} -}; - -MODULE_DEVICE_TABLE(of, bam_of_match); - -static int bam_dma_probe(struct platform_device *pdev) -{ - struct bam_device *bdev; - const struct of_device_id *match; - struct resource *iores; - int ret, i; - - bdev = devm_kzalloc(&pdev->dev, sizeof(*bdev), GFP_KERNEL); - if (!bdev) - return -ENOMEM; - - bdev->dev = &pdev->dev; - - match = of_match_node(bam_of_match, pdev->dev.of_node); - if (!match) { - dev_err(&pdev->dev, "Unsupported BAM module\n"); - return -ENODEV; - } - - bdev->layout = match->data; - - iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); - bdev->regs = devm_ioremap_resource(&pdev->dev, iores); - if (IS_ERR(bdev->regs)) - return PTR_ERR(bdev->regs); - - bdev->irq = platform_get_irq(pdev, 0); - if (bdev->irq < 0) - return bdev->irq; - - ret = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &bdev->ee); - if (ret) { - dev_err(bdev->dev, "Execution environment unspecified\n"); - return ret; - } - - bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk"); - if (IS_ERR(bdev->bamclk)) - return PTR_ERR(bdev->bamclk); - - ret = clk_prepare_enable(bdev->bamclk); - if (ret) { - dev_err(bdev->dev, "failed to prepare/enable clock\n"); - return ret; - } - - ret = bam_init(bdev); - if (ret) - goto err_disable_clk; - - tasklet_init(&bdev->task, dma_tasklet, (unsigned long)bdev); - - bdev->channels = devm_kcalloc(bdev->dev, bdev->num_channels, - sizeof(*bdev->channels), GFP_KERNEL); - - if (!bdev->channels) { - ret = -ENOMEM; - goto err_tasklet_kill; - } - - /* allocate and initialize channels */ - INIT_LIST_HEAD(&bdev->common.channels); - - for (i = 0; i < bdev->num_channels; i++) - bam_channel_init(bdev, &bdev->channels[i], i); - - ret = devm_request_irq(bdev->dev, bdev->irq, bam_dma_irq, - IRQF_TRIGGER_HIGH, "bam_dma", bdev); - if (ret) - goto err_bam_channel_exit; - - /* set max dma segment size */ - bdev->common.dev = bdev->dev; - bdev->common.dev->dma_parms = &bdev->dma_parms; - ret = dma_set_max_seg_size(bdev->common.dev, BAM_MAX_DATA_SIZE); - if (ret) { - dev_err(bdev->dev, "cannot set maximum segment size\n"); - goto err_bam_channel_exit; - } - - platform_set_drvdata(pdev, bdev); - - /* set capabilities */ - dma_cap_zero(bdev->common.cap_mask); - dma_cap_set(DMA_SLAVE, bdev->common.cap_mask); - - /* initialize dmaengine apis */ - bdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); - bdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; - bdev->common.src_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES; - bdev->common.dst_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES; - bdev->common.device_alloc_chan_resources = bam_alloc_chan; - bdev->common.device_free_chan_resources = bam_free_chan; - bdev->common.device_prep_slave_sg = bam_prep_slave_sg; - bdev->common.device_config = bam_slave_config; - bdev->common.device_pause = bam_pause; - bdev->common.device_resume = bam_resume; - bdev->common.device_terminate_all = bam_dma_terminate_all; - bdev->common.device_issue_pending = bam_issue_pending; - bdev->common.device_tx_status = bam_tx_status; - bdev->common.dev = bdev->dev; - - ret = dma_async_device_register(&bdev->common); - if (ret) { - dev_err(bdev->dev, "failed to register dma async device\n"); - goto err_bam_channel_exit; - } - - ret = of_dma_controller_register(pdev->dev.of_node, bam_dma_xlate, - &bdev->common); - if (ret) - goto err_unregister_dma; - - return 0; - -err_unregister_dma: - dma_async_device_unregister(&bdev->common); -err_bam_channel_exit: - for (i = 0; i < bdev->num_channels; i++) - tasklet_kill(&bdev->channels[i].vc.task); -err_tasklet_kill: - tasklet_kill(&bdev->task); -err_disable_clk: - clk_disable_unprepare(bdev->bamclk); - - return ret; -} - -static int bam_dma_remove(struct platform_device *pdev) -{ - struct bam_device *bdev = platform_get_drvdata(pdev); - u32 i; - - of_dma_controller_free(pdev->dev.of_node); - dma_async_device_unregister(&bdev->common); - - /* mask all interrupts for this execution environment */ - writel_relaxed(0, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE)); - - devm_free_irq(bdev->dev, bdev->irq, bdev); - - for (i = 0; i < bdev->num_channels; i++) { - bam_dma_terminate_all(&bdev->channels[i].vc.chan); - tasklet_kill(&bdev->channels[i].vc.task); - - dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE, - bdev->channels[i].fifo_virt, - bdev->channels[i].fifo_phys); - } - - tasklet_kill(&bdev->task); - - clk_disable_unprepare(bdev->bamclk); - - return 0; -} - -static struct platform_driver bam_dma_driver = { - .probe = bam_dma_probe, - .remove = bam_dma_remove, - .driver = { - .name = "bam-dma-engine", - .of_match_table = bam_of_match, - }, -}; - -module_platform_driver(bam_dma_driver); - -MODULE_AUTHOR("Andy Gross "); -MODULE_DESCRIPTION("QCOM BAM DMA engine driver"); -MODULE_LICENSE("GPL v2"); -- cgit v0.10.2 From 858de34c0a31b9438eeab2ae0465c20c68bf63f0 Mon Sep 17 00:00:00 2001 From: Sinan Kaya Date: Thu, 4 Feb 2016 23:34:33 -0500 Subject: dmaengine: hidma: Add Device Tree binding Add documentation for the Qualcomm Technologies HIDMA binding. Signed-off-by: Sinan Kaya Acked-by: Rob Herring Signed-off-by: Vinod Koul diff --git a/Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt b/Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt new file mode 100644 index 0000000..fd5618b --- /dev/null +++ b/Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt @@ -0,0 +1,89 @@ +Qualcomm Technologies HIDMA Management interface + +Qualcomm Technologies HIDMA is a high speed DMA device. It only supports +memcpy and memset capabilities. It has been designed for virtualized +environments. + +Each HIDMA HW instance consists of multiple DMA channels. These channels +share the same bandwidth. The bandwidth utilization can be parititioned +among channels based on the priority and weight assignments. + +There are only two priority levels and 15 weigh assignments possible. + +Other parameters here determine how much of the system bus this HIDMA +instance can use like maximum read/write request and and number of bytes to +read/write in a single burst. + +Main node required properties: +- compatible: "qcom,hidma-mgmt-1.0"; +- reg: Address range for DMA device +- dma-channels: Number of channels supported by this DMA controller. +- max-write-burst-bytes: Maximum write burst in bytes that HIDMA can + occupy the bus for in a single transaction. A memcpy requested is + fragmented to multiples of this amount. This parameter is used while + writing into destination memory. Setting this value incorrectly can + starve other peripherals in the system. +- max-read-burst-bytes: Maximum read burst in bytes that HIDMA can + occupy the bus for in a single transaction. A memcpy request is + fragmented to multiples of this amount. This parameter is used while + reading the source memory. Setting this value incorrectly can starve + other peripherals in the system. +- max-write-transactions: This value is how many times a write burst is + applied back to back while writing to the destination before yielding + the bus. +- max-read-transactions: This value is how many times a read burst is + applied back to back while reading the source before yielding the bus. +- channel-reset-timeout-cycles: Channel reset timeout in cycles for this SOC. + Once a reset is applied to the HW, HW starts a timer for reset operation + to confirm. If reset is not completed within this time, HW reports reset + failure. + +Sub-nodes: + +HIDMA has one or more DMA channels that are used to move data from one +memory location to another. + +When the OS is not in control of the management interface (i.e. it's a guest), +the channel nodes appear on their own, not under a management node. + +Required properties: +- compatible: must contain "qcom,hidma-1.0" +- reg: Addresses for the transfer and event channel +- interrupts: Should contain the event interrupt +- desc-count: Number of asynchronous requests this channel can handle +- iommus: required a iommu node + +Example: + +Hypervisor OS configuration: + + hidma-mgmt@f9984000 = { + compatible = "qcom,hidma-mgmt-1.0"; + reg = <0xf9984000 0x15000>; + dma-channels = <6>; + max-write-burst-bytes = <1024>; + max-read-burst-bytes = <1024>; + max-write-transactions = <31>; + max-read-transactions = <31>; + channel-reset-timeout-cycles = <0x500>; + + hidma_24: dma-controller@0x5c050000 { + compatible = "qcom,hidma-1.0"; + reg = <0 0x5c050000 0x0 0x1000>, + <0 0x5c0b0000 0x0 0x1000>; + interrupts = <0 389 0>; + desc-count = <10>; + iommus = <&system_mmu>; + }; + }; + +Guest OS configuration: + + hidma_24: dma-controller@0x5c050000 { + compatible = "qcom,hidma-1.0"; + reg = <0 0x5c050000 0x0 0x1000>, + <0 0x5c0b0000 0x0 0x1000>; + interrupts = <0 389 0>; + desc-count = <10>; + iommus = <&system_mmu>; + }; -- cgit v0.10.2 From 7f8f209fd6e09a07fd1820144452caba419cf2b4 Mon Sep 17 00:00:00 2001 From: Sinan Kaya Date: Thu, 4 Feb 2016 23:34:34 -0500 Subject: dmaengine: add Qualcomm Technologies HIDMA management driver The Qualcomm Technologies HIDMA device has been designed to support virtualization technology. The driver has been divided into two to follow the hardware design. 1. HIDMA Management driver 2. HIDMA Channel driver Each HIDMA HW consists of multiple channels. These channels share some set of common parameters. These parameters are initialized by the management driver during power up. Same management driver is used for monitoring the execution of the channels. Management driver can change the performance behavior dynamically such as bandwidth allocation and prioritization. The management driver is executed in host context and is the main management entity for all channels provided by the device. Signed-off-by: Sinan Kaya Reviewed-by: Andy Shevchenko Signed-off-by: Vinod Koul diff --git a/Documentation/ABI/testing/sysfs-platform-hidma-mgmt b/Documentation/ABI/testing/sysfs-platform-hidma-mgmt new file mode 100644 index 0000000..c2fb5d0 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-platform-hidma-mgmt @@ -0,0 +1,97 @@ +What: /sys/devices/platform/hidma-mgmt*/chanops/chan*/priority + /sys/devices/platform/QCOM8060:*/chanops/chan*/priority +Date: Nov 2015 +KernelVersion: 4.4 +Contact: "Sinan Kaya " +Description: + Contains either 0 or 1 and indicates if the DMA channel is a + low priority (0) or high priority (1) channel. + +What: /sys/devices/platform/hidma-mgmt*/chanops/chan*/weight + /sys/devices/platform/QCOM8060:*/chanops/chan*/weight +Date: Nov 2015 +KernelVersion: 4.4 +Contact: "Sinan Kaya " +Description: + Contains 0..15 and indicates the weight of the channel among + equal priority channels during round robin scheduling. + +What: /sys/devices/platform/hidma-mgmt*/chreset_timeout_cycles + /sys/devices/platform/QCOM8060:*/chreset_timeout_cycles +Date: Nov 2015 +KernelVersion: 4.4 +Contact: "Sinan Kaya " +Description: + Contains the platform specific cycle value to wait after a + reset command is issued. If the value is chosen too short, + then the HW will issue a reset failure interrupt. The value + is platform specific and should not be changed without + consultance. + +What: /sys/devices/platform/hidma-mgmt*/dma_channels + /sys/devices/platform/QCOM8060:*/dma_channels +Date: Nov 2015 +KernelVersion: 4.4 +Contact: "Sinan Kaya " +Description: + Contains the number of dma channels supported by one instance + of HIDMA hardware. The value may change from chip to chip. + +What: /sys/devices/platform/hidma-mgmt*/hw_version_major + /sys/devices/platform/QCOM8060:*/hw_version_major +Date: Nov 2015 +KernelVersion: 4.4 +Contact: "Sinan Kaya " +Description: + Version number major for the hardware. + +What: /sys/devices/platform/hidma-mgmt*/hw_version_minor + /sys/devices/platform/QCOM8060:*/hw_version_minor +Date: Nov 2015 +KernelVersion: 4.4 +Contact: "Sinan Kaya " +Description: + Version number minor for the hardware. + +What: /sys/devices/platform/hidma-mgmt*/max_rd_xactions + /sys/devices/platform/QCOM8060:*/max_rd_xactions +Date: Nov 2015 +KernelVersion: 4.4 +Contact: "Sinan Kaya " +Description: + Contains a value between 0 and 31. Maximum number of + read transactions that can be issued back to back. + Choosing a higher number gives better performance but + can also cause performance reduction to other peripherals + sharing the same bus. + +What: /sys/devices/platform/hidma-mgmt*/max_read_request + /sys/devices/platform/QCOM8060:*/max_read_request +Date: Nov 2015 +KernelVersion: 4.4 +Contact: "Sinan Kaya " +Description: + Size of each read request. The value needs to be a power + of two and can be between 128 and 1024. + +What: /sys/devices/platform/hidma-mgmt*/max_wr_xactions + /sys/devices/platform/QCOM8060:*/max_wr_xactions +Date: Nov 2015 +KernelVersion: 4.4 +Contact: "Sinan Kaya " +Description: + Contains a value between 0 and 31. Maximum number of + write transactions that can be issued back to back. + Choosing a higher number gives better performance but + can also cause performance reduction to other peripherals + sharing the same bus. + + +What: /sys/devices/platform/hidma-mgmt*/max_write_request + /sys/devices/platform/QCOM8060:*/max_write_request +Date: Nov 2015 +KernelVersion: 4.4 +Contact: "Sinan Kaya " +Description: + Size of each write request. The value needs to be a power + of two and can be between 128 and 1024. diff --git a/drivers/dma/qcom/Kconfig b/drivers/dma/qcom/Kconfig index f17c272..c975b11 100644 --- a/drivers/dma/qcom/Kconfig +++ b/drivers/dma/qcom/Kconfig @@ -6,3 +6,14 @@ config QCOM_BAM_DMA ---help--- Enable support for the QCOM BAM DMA controller. This controller provides DMA capabilities for a variety of on-chip devices. + +config QCOM_HIDMA_MGMT + tristate "Qualcomm Technologies HIDMA Management support" + select DMA_ENGINE + help + Enable support for the Qualcomm Technologies HIDMA Management. + Each DMA device requires one management interface driver + for basic initialization before QCOM_HIDMA channel driver can + start managing the channels. In a virtualized environment, + the guest OS would run QCOM_HIDMA channel driver and the + host would run the QCOM_HIDMA_MGMT management driver. diff --git a/drivers/dma/qcom/Makefile b/drivers/dma/qcom/Makefile index f612ae3..bfea699 100644 --- a/drivers/dma/qcom/Makefile +++ b/drivers/dma/qcom/Makefile @@ -1 +1,3 @@ obj-$(CONFIG_QCOM_BAM_DMA) += bam_dma.o +obj-$(CONFIG_QCOM_HIDMA_MGMT) += hdma_mgmt.o +hdma_mgmt-objs := hidma_mgmt.o hidma_mgmt_sys.o diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c new file mode 100644 index 0000000..ef491b8 --- /dev/null +++ b/drivers/dma/qcom/hidma_mgmt.c @@ -0,0 +1,302 @@ +/* + * Qualcomm Technologies HIDMA DMA engine Management interface + * + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hidma_mgmt.h" + +#define HIDMA_QOS_N_OFFSET 0x300 +#define HIDMA_CFG_OFFSET 0x400 +#define HIDMA_MAX_BUS_REQ_LEN_OFFSET 0x41C +#define HIDMA_MAX_XACTIONS_OFFSET 0x420 +#define HIDMA_HW_VERSION_OFFSET 0x424 +#define HIDMA_CHRESET_TIMEOUT_OFFSET 0x418 + +#define HIDMA_MAX_WR_XACTIONS_MASK GENMASK(4, 0) +#define HIDMA_MAX_RD_XACTIONS_MASK GENMASK(4, 0) +#define HIDMA_WEIGHT_MASK GENMASK(6, 0) +#define HIDMA_MAX_BUS_REQ_LEN_MASK GENMASK(15, 0) +#define HIDMA_CHRESET_TIMEOUT_MASK GENMASK(19, 0) + +#define HIDMA_MAX_WR_XACTIONS_BIT_POS 16 +#define HIDMA_MAX_BUS_WR_REQ_BIT_POS 16 +#define HIDMA_WRR_BIT_POS 8 +#define HIDMA_PRIORITY_BIT_POS 15 + +#define HIDMA_AUTOSUSPEND_TIMEOUT 2000 +#define HIDMA_MAX_CHANNEL_WEIGHT 15 + +int hidma_mgmt_setup(struct hidma_mgmt_dev *mgmtdev) +{ + unsigned int i; + u32 val; + + if (!is_power_of_2(mgmtdev->max_write_request) || + (mgmtdev->max_write_request < 128) || + (mgmtdev->max_write_request > 1024)) { + dev_err(&mgmtdev->pdev->dev, "invalid write request %d\n", + mgmtdev->max_write_request); + return -EINVAL; + } + + if (!is_power_of_2(mgmtdev->max_read_request) || + (mgmtdev->max_read_request < 128) || + (mgmtdev->max_read_request > 1024)) { + dev_err(&mgmtdev->pdev->dev, "invalid read request %d\n", + mgmtdev->max_read_request); + return -EINVAL; + } + + if (mgmtdev->max_wr_xactions > HIDMA_MAX_WR_XACTIONS_MASK) { + dev_err(&mgmtdev->pdev->dev, + "max_wr_xactions cannot be bigger than %ld\n", + HIDMA_MAX_WR_XACTIONS_MASK); + return -EINVAL; + } + + if (mgmtdev->max_rd_xactions > HIDMA_MAX_RD_XACTIONS_MASK) { + dev_err(&mgmtdev->pdev->dev, + "max_rd_xactions cannot be bigger than %ld\n", + HIDMA_MAX_RD_XACTIONS_MASK); + return -EINVAL; + } + + for (i = 0; i < mgmtdev->dma_channels; i++) { + if (mgmtdev->priority[i] > 1) { + dev_err(&mgmtdev->pdev->dev, + "priority can be 0 or 1\n"); + return -EINVAL; + } + + if (mgmtdev->weight[i] > HIDMA_MAX_CHANNEL_WEIGHT) { + dev_err(&mgmtdev->pdev->dev, + "max value of weight can be %d.\n", + HIDMA_MAX_CHANNEL_WEIGHT); + return -EINVAL; + } + + /* weight needs to be at least one */ + if (mgmtdev->weight[i] == 0) + mgmtdev->weight[i] = 1; + } + + pm_runtime_get_sync(&mgmtdev->pdev->dev); + val = readl(mgmtdev->virtaddr + HIDMA_MAX_BUS_REQ_LEN_OFFSET); + val &= ~(HIDMA_MAX_BUS_REQ_LEN_MASK << HIDMA_MAX_BUS_WR_REQ_BIT_POS); + val |= mgmtdev->max_write_request << HIDMA_MAX_BUS_WR_REQ_BIT_POS; + val &= ~HIDMA_MAX_BUS_REQ_LEN_MASK; + val |= mgmtdev->max_read_request; + writel(val, mgmtdev->virtaddr + HIDMA_MAX_BUS_REQ_LEN_OFFSET); + + val = readl(mgmtdev->virtaddr + HIDMA_MAX_XACTIONS_OFFSET); + val &= ~(HIDMA_MAX_WR_XACTIONS_MASK << HIDMA_MAX_WR_XACTIONS_BIT_POS); + val |= mgmtdev->max_wr_xactions << HIDMA_MAX_WR_XACTIONS_BIT_POS; + val &= ~HIDMA_MAX_RD_XACTIONS_MASK; + val |= mgmtdev->max_rd_xactions; + writel(val, mgmtdev->virtaddr + HIDMA_MAX_XACTIONS_OFFSET); + + mgmtdev->hw_version = + readl(mgmtdev->virtaddr + HIDMA_HW_VERSION_OFFSET); + mgmtdev->hw_version_major = (mgmtdev->hw_version >> 28) & 0xF; + mgmtdev->hw_version_minor = (mgmtdev->hw_version >> 16) & 0xF; + + for (i = 0; i < mgmtdev->dma_channels; i++) { + u32 weight = mgmtdev->weight[i]; + u32 priority = mgmtdev->priority[i]; + + val = readl(mgmtdev->virtaddr + HIDMA_QOS_N_OFFSET + (4 * i)); + val &= ~(1 << HIDMA_PRIORITY_BIT_POS); + val |= (priority & 0x1) << HIDMA_PRIORITY_BIT_POS; + val &= ~(HIDMA_WEIGHT_MASK << HIDMA_WRR_BIT_POS); + val |= (weight & HIDMA_WEIGHT_MASK) << HIDMA_WRR_BIT_POS; + writel(val, mgmtdev->virtaddr + HIDMA_QOS_N_OFFSET + (4 * i)); + } + + val = readl(mgmtdev->virtaddr + HIDMA_CHRESET_TIMEOUT_OFFSET); + val &= ~HIDMA_CHRESET_TIMEOUT_MASK; + val |= mgmtdev->chreset_timeout_cycles & HIDMA_CHRESET_TIMEOUT_MASK; + writel(val, mgmtdev->virtaddr + HIDMA_CHRESET_TIMEOUT_OFFSET); + + pm_runtime_mark_last_busy(&mgmtdev->pdev->dev); + pm_runtime_put_autosuspend(&mgmtdev->pdev->dev); + return 0; +} +EXPORT_SYMBOL_GPL(hidma_mgmt_setup); + +static int hidma_mgmt_probe(struct platform_device *pdev) +{ + struct hidma_mgmt_dev *mgmtdev; + struct resource *res; + void __iomem *virtaddr; + int irq; + int rc; + u32 val; + + pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + pm_runtime_get_sync(&pdev->dev); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + virtaddr = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(virtaddr)) { + rc = -ENOMEM; + goto out; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "irq resources not found\n"); + rc = irq; + goto out; + } + + mgmtdev = devm_kzalloc(&pdev->dev, sizeof(*mgmtdev), GFP_KERNEL); + if (!mgmtdev) { + rc = -ENOMEM; + goto out; + } + + mgmtdev->pdev = pdev; + mgmtdev->addrsize = resource_size(res); + mgmtdev->virtaddr = virtaddr; + + rc = device_property_read_u32(&pdev->dev, "dma-channels", + &mgmtdev->dma_channels); + if (rc) { + dev_err(&pdev->dev, "number of channels missing\n"); + goto out; + } + + rc = device_property_read_u32(&pdev->dev, + "channel-reset-timeout-cycles", + &mgmtdev->chreset_timeout_cycles); + if (rc) { + dev_err(&pdev->dev, "channel reset timeout missing\n"); + goto out; + } + + rc = device_property_read_u32(&pdev->dev, "max-write-burst-bytes", + &mgmtdev->max_write_request); + if (rc) { + dev_err(&pdev->dev, "max-write-burst-bytes missing\n"); + goto out; + } + + rc = device_property_read_u32(&pdev->dev, "max-read-burst-bytes", + &mgmtdev->max_read_request); + if (rc) { + dev_err(&pdev->dev, "max-read-burst-bytes missing\n"); + goto out; + } + + rc = device_property_read_u32(&pdev->dev, "max-write-transactions", + &mgmtdev->max_wr_xactions); + if (rc) { + dev_err(&pdev->dev, "max-write-transactions missing\n"); + goto out; + } + + rc = device_property_read_u32(&pdev->dev, "max-read-transactions", + &mgmtdev->max_rd_xactions); + if (rc) { + dev_err(&pdev->dev, "max-read-transactions missing\n"); + goto out; + } + + mgmtdev->priority = devm_kcalloc(&pdev->dev, + mgmtdev->dma_channels, + sizeof(*mgmtdev->priority), + GFP_KERNEL); + if (!mgmtdev->priority) { + rc = -ENOMEM; + goto out; + } + + mgmtdev->weight = devm_kcalloc(&pdev->dev, + mgmtdev->dma_channels, + sizeof(*mgmtdev->weight), GFP_KERNEL); + if (!mgmtdev->weight) { + rc = -ENOMEM; + goto out; + } + + rc = hidma_mgmt_setup(mgmtdev); + if (rc) { + dev_err(&pdev->dev, "setup failed\n"); + goto out; + } + + /* start the HW */ + val = readl(mgmtdev->virtaddr + HIDMA_CFG_OFFSET); + val |= 1; + writel(val, mgmtdev->virtaddr + HIDMA_CFG_OFFSET); + + rc = hidma_mgmt_init_sys(mgmtdev); + if (rc) { + dev_err(&pdev->dev, "sysfs setup failed\n"); + goto out; + } + + dev_info(&pdev->dev, + "HW rev: %d.%d @ %pa with %d physical channels\n", + mgmtdev->hw_version_major, mgmtdev->hw_version_minor, + &res->start, mgmtdev->dma_channels); + + platform_set_drvdata(pdev, mgmtdev); + pm_runtime_mark_last_busy(&pdev->dev); + pm_runtime_put_autosuspend(&pdev->dev); + return 0; +out: + pm_runtime_put_sync_suspend(&pdev->dev); + pm_runtime_disable(&pdev->dev); + return rc; +} + +#if IS_ENABLED(CONFIG_ACPI) +static const struct acpi_device_id hidma_mgmt_acpi_ids[] = { + {"QCOM8060"}, + {}, +}; +#endif + +static const struct of_device_id hidma_mgmt_match[] = { + {.compatible = "qcom,hidma-mgmt-1.0",}, + {}, +}; +MODULE_DEVICE_TABLE(of, hidma_mgmt_match); + +static struct platform_driver hidma_mgmt_driver = { + .probe = hidma_mgmt_probe, + .driver = { + .name = "hidma-mgmt", + .of_match_table = hidma_mgmt_match, + .acpi_match_table = ACPI_PTR(hidma_mgmt_acpi_ids), + }, +}; + +module_platform_driver(hidma_mgmt_driver); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/dma/qcom/hidma_mgmt.h b/drivers/dma/qcom/hidma_mgmt.h new file mode 100644 index 0000000..f7daf33 --- /dev/null +++ b/drivers/dma/qcom/hidma_mgmt.h @@ -0,0 +1,39 @@ +/* + * Qualcomm Technologies HIDMA Management common header + * + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +struct hidma_mgmt_dev { + u8 hw_version_major; + u8 hw_version_minor; + + u32 max_wr_xactions; + u32 max_rd_xactions; + u32 max_write_request; + u32 max_read_request; + u32 dma_channels; + u32 chreset_timeout_cycles; + u32 hw_version; + u32 *priority; + u32 *weight; + + /* Hardware device constants */ + void __iomem *virtaddr; + resource_size_t addrsize; + + struct kobject **chroots; + struct platform_device *pdev; +}; + +int hidma_mgmt_init_sys(struct hidma_mgmt_dev *dev); +int hidma_mgmt_setup(struct hidma_mgmt_dev *mgmtdev); diff --git a/drivers/dma/qcom/hidma_mgmt_sys.c b/drivers/dma/qcom/hidma_mgmt_sys.c new file mode 100644 index 0000000..d61f106 --- /dev/null +++ b/drivers/dma/qcom/hidma_mgmt_sys.c @@ -0,0 +1,295 @@ +/* + * Qualcomm Technologies HIDMA Management SYS interface + * + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + +#include "hidma_mgmt.h" + +struct hidma_chan_attr { + struct hidma_mgmt_dev *mdev; + int index; + struct kobj_attribute attr; +}; + +struct hidma_mgmt_fileinfo { + char *name; + int mode; + int (*get)(struct hidma_mgmt_dev *mdev); + int (*set)(struct hidma_mgmt_dev *mdev, u64 val); +}; + +#define IMPLEMENT_GETSET(name) \ +static int get_##name(struct hidma_mgmt_dev *mdev) \ +{ \ + return mdev->name; \ +} \ +static int set_##name(struct hidma_mgmt_dev *mdev, u64 val) \ +{ \ + u64 tmp; \ + int rc; \ + \ + tmp = mdev->name; \ + mdev->name = val; \ + rc = hidma_mgmt_setup(mdev); \ + if (rc) \ + mdev->name = tmp; \ + return rc; \ +} + +#define DECLARE_ATTRIBUTE(name, mode) \ + {#name, mode, get_##name, set_##name} + +IMPLEMENT_GETSET(hw_version_major) +IMPLEMENT_GETSET(hw_version_minor) +IMPLEMENT_GETSET(max_wr_xactions) +IMPLEMENT_GETSET(max_rd_xactions) +IMPLEMENT_GETSET(max_write_request) +IMPLEMENT_GETSET(max_read_request) +IMPLEMENT_GETSET(dma_channels) +IMPLEMENT_GETSET(chreset_timeout_cycles) + +static int set_priority(struct hidma_mgmt_dev *mdev, unsigned int i, u64 val) +{ + u64 tmp; + int rc; + + if (i >= mdev->dma_channels) + return -EINVAL; + + tmp = mdev->priority[i]; + mdev->priority[i] = val; + rc = hidma_mgmt_setup(mdev); + if (rc) + mdev->priority[i] = tmp; + return rc; +} + +static int set_weight(struct hidma_mgmt_dev *mdev, unsigned int i, u64 val) +{ + u64 tmp; + int rc; + + if (i >= mdev->dma_channels) + return -EINVAL; + + tmp = mdev->weight[i]; + mdev->weight[i] = val; + rc = hidma_mgmt_setup(mdev); + if (rc) + mdev->weight[i] = tmp; + return rc; +} + +static struct hidma_mgmt_fileinfo hidma_mgmt_files[] = { + DECLARE_ATTRIBUTE(hw_version_major, S_IRUGO), + DECLARE_ATTRIBUTE(hw_version_minor, S_IRUGO), + DECLARE_ATTRIBUTE(dma_channels, S_IRUGO), + DECLARE_ATTRIBUTE(chreset_timeout_cycles, S_IRUGO), + DECLARE_ATTRIBUTE(max_wr_xactions, S_IRUGO), + DECLARE_ATTRIBUTE(max_rd_xactions, S_IRUGO), + DECLARE_ATTRIBUTE(max_write_request, S_IRUGO), + DECLARE_ATTRIBUTE(max_read_request, S_IRUGO), +}; + +static ssize_t show_values(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct platform_device *pdev = to_platform_device(dev); + struct hidma_mgmt_dev *mdev = platform_get_drvdata(pdev); + unsigned int i; + + buf[0] = 0; + + for (i = 0; i < ARRAY_SIZE(hidma_mgmt_files); i++) { + if (strcmp(attr->attr.name, hidma_mgmt_files[i].name) == 0) { + sprintf(buf, "%d\n", hidma_mgmt_files[i].get(mdev)); + break; + } + } + return strlen(buf); +} + +static ssize_t set_values(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct platform_device *pdev = to_platform_device(dev); + struct hidma_mgmt_dev *mdev = platform_get_drvdata(pdev); + unsigned long tmp; + unsigned int i; + int rc; + + rc = kstrtoul(buf, 0, &tmp); + if (rc) + return rc; + + for (i = 0; i < ARRAY_SIZE(hidma_mgmt_files); i++) { + if (strcmp(attr->attr.name, hidma_mgmt_files[i].name) == 0) { + rc = hidma_mgmt_files[i].set(mdev, tmp); + if (rc) + return rc; + + break; + } + } + return count; +} + +static ssize_t show_values_channel(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct hidma_chan_attr *chattr; + struct hidma_mgmt_dev *mdev; + + buf[0] = 0; + chattr = container_of(attr, struct hidma_chan_attr, attr); + mdev = chattr->mdev; + if (strcmp(attr->attr.name, "priority") == 0) + sprintf(buf, "%d\n", mdev->priority[chattr->index]); + else if (strcmp(attr->attr.name, "weight") == 0) + sprintf(buf, "%d\n", mdev->weight[chattr->index]); + + return strlen(buf); +} + +static ssize_t set_values_channel(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, + size_t count) +{ + struct hidma_chan_attr *chattr; + struct hidma_mgmt_dev *mdev; + unsigned long tmp; + int rc; + + chattr = container_of(attr, struct hidma_chan_attr, attr); + mdev = chattr->mdev; + + rc = kstrtoul(buf, 0, &tmp); + if (rc) + return rc; + + if (strcmp(attr->attr.name, "priority") == 0) { + rc = set_priority(mdev, chattr->index, tmp); + if (rc) + return rc; + } else if (strcmp(attr->attr.name, "weight") == 0) { + rc = set_weight(mdev, chattr->index, tmp); + if (rc) + return rc; + } + return count; +} + +static int create_sysfs_entry(struct hidma_mgmt_dev *dev, char *name, int mode) +{ + struct device_attribute *attrs; + char *name_copy; + + attrs = devm_kmalloc(&dev->pdev->dev, + sizeof(struct device_attribute), GFP_KERNEL); + if (!attrs) + return -ENOMEM; + + name_copy = devm_kstrdup(&dev->pdev->dev, name, GFP_KERNEL); + if (!name_copy) + return -ENOMEM; + + attrs->attr.name = name_copy; + attrs->attr.mode = mode; + attrs->show = show_values; + attrs->store = set_values; + sysfs_attr_init(&attrs->attr); + + return device_create_file(&dev->pdev->dev, attrs); +} + +static int create_sysfs_entry_channel(struct hidma_mgmt_dev *mdev, char *name, + int mode, int index, + struct kobject *parent) +{ + struct hidma_chan_attr *chattr; + char *name_copy; + + chattr = devm_kmalloc(&mdev->pdev->dev, sizeof(*chattr), GFP_KERNEL); + if (!chattr) + return -ENOMEM; + + name_copy = devm_kstrdup(&mdev->pdev->dev, name, GFP_KERNEL); + if (!name_copy) + return -ENOMEM; + + chattr->mdev = mdev; + chattr->index = index; + chattr->attr.attr.name = name_copy; + chattr->attr.attr.mode = mode; + chattr->attr.show = show_values_channel; + chattr->attr.store = set_values_channel; + sysfs_attr_init(&chattr->attr.attr); + + return sysfs_create_file(parent, &chattr->attr.attr); +} + +int hidma_mgmt_init_sys(struct hidma_mgmt_dev *mdev) +{ + unsigned int i; + int rc; + int required; + struct kobject *chanops; + + required = sizeof(*mdev->chroots) * mdev->dma_channels; + mdev->chroots = devm_kmalloc(&mdev->pdev->dev, required, GFP_KERNEL); + if (!mdev->chroots) + return -ENOMEM; + + chanops = kobject_create_and_add("chanops", &mdev->pdev->dev.kobj); + if (!chanops) + return -ENOMEM; + + /* create each channel directory here */ + for (i = 0; i < mdev->dma_channels; i++) { + char name[20]; + + snprintf(name, sizeof(name), "chan%d", i); + mdev->chroots[i] = kobject_create_and_add(name, chanops); + if (!mdev->chroots[i]) + return -ENOMEM; + } + + /* populate common parameters */ + for (i = 0; i < ARRAY_SIZE(hidma_mgmt_files); i++) { + rc = create_sysfs_entry(mdev, hidma_mgmt_files[i].name, + hidma_mgmt_files[i].mode); + if (rc) + return rc; + } + + /* populate parameters that are per channel */ + for (i = 0; i < mdev->dma_channels; i++) { + rc = create_sysfs_entry_channel(mdev, "priority", + (S_IRUGO | S_IWUGO), i, + mdev->chroots[i]); + if (rc) + return rc; + + rc = create_sysfs_entry_channel(mdev, "weight", + (S_IRUGO | S_IWUGO), i, + mdev->chroots[i]); + if (rc) + return rc; + } + + return 0; +} +EXPORT_SYMBOL_GPL(hidma_mgmt_init_sys); -- cgit v0.10.2 From 67a2003e060739747cfa3ea9b0d88b3d321ebf24 Mon Sep 17 00:00:00 2001 From: Sinan Kaya Date: Thu, 4 Feb 2016 23:34:35 -0500 Subject: dmaengine: add Qualcomm Technologies HIDMA channel driver This patch adds support for hidma engine. The driver consists of two logical blocks. The DMA engine interface and the low-level interface. The hardware only supports memcpy/memset and this driver only support memcpy interface. HW and driver doesn't support slave interface. Signed-off-by: Sinan Kaya Reviewed-by: Andy Shevchenko Signed-off-by: Vinod Koul diff --git a/drivers/dma/qcom/Kconfig b/drivers/dma/qcom/Kconfig index c975b11..a7761c4 100644 --- a/drivers/dma/qcom/Kconfig +++ b/drivers/dma/qcom/Kconfig @@ -17,3 +17,13 @@ config QCOM_HIDMA_MGMT start managing the channels. In a virtualized environment, the guest OS would run QCOM_HIDMA channel driver and the host would run the QCOM_HIDMA_MGMT management driver. + +config QCOM_HIDMA + tristate "Qualcomm Technologies HIDMA Channel support" + select DMA_ENGINE + help + Enable support for the Qualcomm Technologies HIDMA controller. + The HIDMA controller supports optimized buffer copies + (user to kernel, kernel to kernel, etc.). It only supports + memcpy interface. The core is not intended for general + purpose slave DMA. diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c new file mode 100644 index 0000000..cccc78e --- /dev/null +++ b/drivers/dma/qcom/hidma.c @@ -0,0 +1,706 @@ +/* + * Qualcomm Technologies HIDMA DMA engine interface + * + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008. + * Copyright (C) Semihalf 2009 + * Copyright (C) Ilya Yanok, Emcraft Systems 2010 + * Copyright (C) Alexander Popov, Promcontroller 2014 + * + * Written by Piotr Ziecik . Hardware description + * (defines, structures and comments) was taken from MPC5121 DMA driver + * written by Hongjun Chen . + * + * Approved as OSADL project by a majority of OSADL members and funded + * by OSADL membership fees in 2009; for details see www.osadl.org. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called COPYING. + */ + +/* Linux Foundation elects GPLv2 license only. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../dmaengine.h" +#include "hidma.h" + +/* + * Default idle time is 2 seconds. This parameter can + * be overridden by changing the following + * /sys/bus/platform/devices/QCOM8061:/power/autosuspend_delay_ms + * during kernel boot. + */ +#define HIDMA_AUTOSUSPEND_TIMEOUT 2000 +#define HIDMA_ERR_INFO_SW 0xFF +#define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE 0x0 +#define HIDMA_NR_DEFAULT_DESC 10 + +static inline struct hidma_dev *to_hidma_dev(struct dma_device *dmadev) +{ + return container_of(dmadev, struct hidma_dev, ddev); +} + +static inline +struct hidma_dev *to_hidma_dev_from_lldev(struct hidma_lldev **_lldevp) +{ + return container_of(_lldevp, struct hidma_dev, lldev); +} + +static inline struct hidma_chan *to_hidma_chan(struct dma_chan *dmach) +{ + return container_of(dmach, struct hidma_chan, chan); +} + +static inline +struct hidma_desc *to_hidma_desc(struct dma_async_tx_descriptor *t) +{ + return container_of(t, struct hidma_desc, desc); +} + +static void hidma_free(struct hidma_dev *dmadev) +{ + INIT_LIST_HEAD(&dmadev->ddev.channels); +} + +static unsigned int nr_desc_prm; +module_param(nr_desc_prm, uint, 0644); +MODULE_PARM_DESC(nr_desc_prm, "number of descriptors (default: 0)"); + + +/* process completed descriptors */ +static void hidma_process_completed(struct hidma_chan *mchan) +{ + struct dma_device *ddev = mchan->chan.device; + struct hidma_dev *mdma = to_hidma_dev(ddev); + struct dma_async_tx_descriptor *desc; + dma_cookie_t last_cookie; + struct hidma_desc *mdesc; + unsigned long irqflags; + struct list_head list; + + INIT_LIST_HEAD(&list); + + /* Get all completed descriptors */ + spin_lock_irqsave(&mchan->lock, irqflags); + list_splice_tail_init(&mchan->completed, &list); + spin_unlock_irqrestore(&mchan->lock, irqflags); + + /* Execute callbacks and run dependencies */ + list_for_each_entry(mdesc, &list, node) { + enum dma_status llstat; + + desc = &mdesc->desc; + + spin_lock_irqsave(&mchan->lock, irqflags); + dma_cookie_complete(desc); + spin_unlock_irqrestore(&mchan->lock, irqflags); + + llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch); + if (desc->callback && (llstat == DMA_COMPLETE)) + desc->callback(desc->callback_param); + + last_cookie = desc->cookie; + dma_run_dependencies(desc); + } + + /* Free descriptors */ + spin_lock_irqsave(&mchan->lock, irqflags); + list_splice_tail_init(&list, &mchan->free); + spin_unlock_irqrestore(&mchan->lock, irqflags); + +} + +/* + * Called once for each submitted descriptor. + * PM is locked once for each descriptor that is currently + * in execution. + */ +static void hidma_callback(void *data) +{ + struct hidma_desc *mdesc = data; + struct hidma_chan *mchan = to_hidma_chan(mdesc->desc.chan); + struct dma_device *ddev = mchan->chan.device; + struct hidma_dev *dmadev = to_hidma_dev(ddev); + unsigned long irqflags; + bool queued = false; + + spin_lock_irqsave(&mchan->lock, irqflags); + if (mdesc->node.next) { + /* Delete from the active list, add to completed list */ + list_move_tail(&mdesc->node, &mchan->completed); + queued = true; + + /* calculate the next running descriptor */ + mchan->running = list_first_entry(&mchan->active, + struct hidma_desc, node); + } + spin_unlock_irqrestore(&mchan->lock, irqflags); + + hidma_process_completed(mchan); + + if (queued) { + pm_runtime_mark_last_busy(dmadev->ddev.dev); + pm_runtime_put_autosuspend(dmadev->ddev.dev); + } +} + +static int hidma_chan_init(struct hidma_dev *dmadev, u32 dma_sig) +{ + struct hidma_chan *mchan; + struct dma_device *ddev; + + mchan = devm_kzalloc(dmadev->ddev.dev, sizeof(*mchan), GFP_KERNEL); + if (!mchan) + return -ENOMEM; + + ddev = &dmadev->ddev; + mchan->dma_sig = dma_sig; + mchan->dmadev = dmadev; + mchan->chan.device = ddev; + dma_cookie_init(&mchan->chan); + + INIT_LIST_HEAD(&mchan->free); + INIT_LIST_HEAD(&mchan->prepared); + INIT_LIST_HEAD(&mchan->active); + INIT_LIST_HEAD(&mchan->completed); + + spin_lock_init(&mchan->lock); + list_add_tail(&mchan->chan.device_node, &ddev->channels); + dmadev->ddev.chancnt++; + return 0; +} + +static void hidma_issue_task(unsigned long arg) +{ + struct hidma_dev *dmadev = (struct hidma_dev *)arg; + + pm_runtime_get_sync(dmadev->ddev.dev); + hidma_ll_start(dmadev->lldev); +} + +static void hidma_issue_pending(struct dma_chan *dmach) +{ + struct hidma_chan *mchan = to_hidma_chan(dmach); + struct hidma_dev *dmadev = mchan->dmadev; + unsigned long flags; + int status; + + spin_lock_irqsave(&mchan->lock, flags); + if (!mchan->running) { + struct hidma_desc *desc = list_first_entry(&mchan->active, + struct hidma_desc, + node); + mchan->running = desc; + } + spin_unlock_irqrestore(&mchan->lock, flags); + + /* PM will be released in hidma_callback function. */ + status = pm_runtime_get(dmadev->ddev.dev); + if (status < 0) + tasklet_schedule(&dmadev->task); + else + hidma_ll_start(dmadev->lldev); +} + +static enum dma_status hidma_tx_status(struct dma_chan *dmach, + dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + struct hidma_chan *mchan = to_hidma_chan(dmach); + enum dma_status ret; + + ret = dma_cookie_status(dmach, cookie, txstate); + if (ret == DMA_COMPLETE) + return ret; + + if (mchan->paused && (ret == DMA_IN_PROGRESS)) { + unsigned long flags; + dma_cookie_t runcookie; + + spin_lock_irqsave(&mchan->lock, flags); + if (mchan->running) + runcookie = mchan->running->desc.cookie; + else + runcookie = -EINVAL; + + if (runcookie == cookie) + ret = DMA_PAUSED; + + spin_unlock_irqrestore(&mchan->lock, flags); + } + + return ret; +} + +/* + * Submit descriptor to hardware. + * Lock the PM for each descriptor we are sending. + */ +static dma_cookie_t hidma_tx_submit(struct dma_async_tx_descriptor *txd) +{ + struct hidma_chan *mchan = to_hidma_chan(txd->chan); + struct hidma_dev *dmadev = mchan->dmadev; + struct hidma_desc *mdesc; + unsigned long irqflags; + dma_cookie_t cookie; + + pm_runtime_get_sync(dmadev->ddev.dev); + if (!hidma_ll_isenabled(dmadev->lldev)) { + pm_runtime_mark_last_busy(dmadev->ddev.dev); + pm_runtime_put_autosuspend(dmadev->ddev.dev); + return -ENODEV; + } + + mdesc = container_of(txd, struct hidma_desc, desc); + spin_lock_irqsave(&mchan->lock, irqflags); + + /* Move descriptor to active */ + list_move_tail(&mdesc->node, &mchan->active); + + /* Update cookie */ + cookie = dma_cookie_assign(txd); + + hidma_ll_queue_request(dmadev->lldev, mdesc->tre_ch); + spin_unlock_irqrestore(&mchan->lock, irqflags); + + return cookie; +} + +static int hidma_alloc_chan_resources(struct dma_chan *dmach) +{ + struct hidma_chan *mchan = to_hidma_chan(dmach); + struct hidma_dev *dmadev = mchan->dmadev; + struct hidma_desc *mdesc, *tmp; + unsigned long irqflags; + LIST_HEAD(descs); + unsigned int i; + int rc = 0; + + if (mchan->allocated) + return 0; + + /* Alloc descriptors for this channel */ + for (i = 0; i < dmadev->nr_descriptors; i++) { + mdesc = kzalloc(sizeof(struct hidma_desc), GFP_NOWAIT); + if (!mdesc) { + rc = -ENOMEM; + break; + } + dma_async_tx_descriptor_init(&mdesc->desc, dmach); + mdesc->desc.tx_submit = hidma_tx_submit; + + rc = hidma_ll_request(dmadev->lldev, mchan->dma_sig, + "DMA engine", hidma_callback, mdesc, + &mdesc->tre_ch); + if (rc) { + dev_err(dmach->device->dev, + "channel alloc failed at %u\n", i); + kfree(mdesc); + break; + } + list_add_tail(&mdesc->node, &descs); + } + + if (rc) { + /* return the allocated descriptors */ + list_for_each_entry_safe(mdesc, tmp, &descs, node) { + hidma_ll_free(dmadev->lldev, mdesc->tre_ch); + kfree(mdesc); + } + return rc; + } + + spin_lock_irqsave(&mchan->lock, irqflags); + list_splice_tail_init(&descs, &mchan->free); + mchan->allocated = true; + spin_unlock_irqrestore(&mchan->lock, irqflags); + return 1; +} + +static struct dma_async_tx_descriptor * +hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src, + size_t len, unsigned long flags) +{ + struct hidma_chan *mchan = to_hidma_chan(dmach); + struct hidma_desc *mdesc = NULL; + struct hidma_dev *mdma = mchan->dmadev; + unsigned long irqflags; + + /* Get free descriptor */ + spin_lock_irqsave(&mchan->lock, irqflags); + if (!list_empty(&mchan->free)) { + mdesc = list_first_entry(&mchan->free, struct hidma_desc, node); + list_del(&mdesc->node); + } + spin_unlock_irqrestore(&mchan->lock, irqflags); + + if (!mdesc) + return NULL; + + hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch, + src, dest, len, flags); + + /* Place descriptor in prepared list */ + spin_lock_irqsave(&mchan->lock, irqflags); + list_add_tail(&mdesc->node, &mchan->prepared); + spin_unlock_irqrestore(&mchan->lock, irqflags); + + return &mdesc->desc; +} + +static int hidma_terminate_channel(struct dma_chan *chan) +{ + struct hidma_chan *mchan = to_hidma_chan(chan); + struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device); + struct hidma_desc *tmp, *mdesc; + unsigned long irqflags; + LIST_HEAD(list); + int rc; + + pm_runtime_get_sync(dmadev->ddev.dev); + /* give completed requests a chance to finish */ + hidma_process_completed(mchan); + + spin_lock_irqsave(&mchan->lock, irqflags); + list_splice_init(&mchan->active, &list); + list_splice_init(&mchan->prepared, &list); + list_splice_init(&mchan->completed, &list); + spin_unlock_irqrestore(&mchan->lock, irqflags); + + /* this suspends the existing transfer */ + rc = hidma_ll_pause(dmadev->lldev); + if (rc) { + dev_err(dmadev->ddev.dev, "channel did not pause\n"); + goto out; + } + + /* return all user requests */ + list_for_each_entry_safe(mdesc, tmp, &list, node) { + struct dma_async_tx_descriptor *txd = &mdesc->desc; + dma_async_tx_callback callback = mdesc->desc.callback; + void *param = mdesc->desc.callback_param; + + dma_descriptor_unmap(txd); + + if (callback) + callback(param); + + dma_run_dependencies(txd); + + /* move myself to free_list */ + list_move(&mdesc->node, &mchan->free); + } + + rc = hidma_ll_resume(dmadev->lldev); +out: + pm_runtime_mark_last_busy(dmadev->ddev.dev); + pm_runtime_put_autosuspend(dmadev->ddev.dev); + return rc; +} + +static int hidma_terminate_all(struct dma_chan *chan) +{ + struct hidma_chan *mchan = to_hidma_chan(chan); + struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device); + int rc; + + rc = hidma_terminate_channel(chan); + if (rc) + return rc; + + /* reinitialize the hardware */ + pm_runtime_get_sync(dmadev->ddev.dev); + rc = hidma_ll_setup(dmadev->lldev); + pm_runtime_mark_last_busy(dmadev->ddev.dev); + pm_runtime_put_autosuspend(dmadev->ddev.dev); + return rc; +} + +static void hidma_free_chan_resources(struct dma_chan *dmach) +{ + struct hidma_chan *mchan = to_hidma_chan(dmach); + struct hidma_dev *mdma = mchan->dmadev; + struct hidma_desc *mdesc, *tmp; + unsigned long irqflags; + LIST_HEAD(descs); + + /* terminate running transactions and free descriptors */ + hidma_terminate_channel(dmach); + + spin_lock_irqsave(&mchan->lock, irqflags); + + /* Move data */ + list_splice_tail_init(&mchan->free, &descs); + + /* Free descriptors */ + list_for_each_entry_safe(mdesc, tmp, &descs, node) { + hidma_ll_free(mdma->lldev, mdesc->tre_ch); + list_del(&mdesc->node); + kfree(mdesc); + } + + mchan->allocated = 0; + spin_unlock_irqrestore(&mchan->lock, irqflags); +} + +static int hidma_pause(struct dma_chan *chan) +{ + struct hidma_chan *mchan; + struct hidma_dev *dmadev; + + mchan = to_hidma_chan(chan); + dmadev = to_hidma_dev(mchan->chan.device); + if (!mchan->paused) { + pm_runtime_get_sync(dmadev->ddev.dev); + if (hidma_ll_pause(dmadev->lldev)) + dev_warn(dmadev->ddev.dev, "channel did not stop\n"); + mchan->paused = true; + pm_runtime_mark_last_busy(dmadev->ddev.dev); + pm_runtime_put_autosuspend(dmadev->ddev.dev); + } + return 0; +} + +static int hidma_resume(struct dma_chan *chan) +{ + struct hidma_chan *mchan; + struct hidma_dev *dmadev; + int rc = 0; + + mchan = to_hidma_chan(chan); + dmadev = to_hidma_dev(mchan->chan.device); + if (mchan->paused) { + pm_runtime_get_sync(dmadev->ddev.dev); + rc = hidma_ll_resume(dmadev->lldev); + if (!rc) + mchan->paused = false; + else + dev_err(dmadev->ddev.dev, + "failed to resume the channel"); + pm_runtime_mark_last_busy(dmadev->ddev.dev); + pm_runtime_put_autosuspend(dmadev->ddev.dev); + } + return rc; +} + +static irqreturn_t hidma_chirq_handler(int chirq, void *arg) +{ + struct hidma_lldev *lldev = arg; + + /* + * All interrupts are request driven. + * HW doesn't send an interrupt by itself. + */ + return hidma_ll_inthandler(chirq, lldev); +} + +static int hidma_probe(struct platform_device *pdev) +{ + struct hidma_dev *dmadev; + struct resource *trca_resource; + struct resource *evca_resource; + int chirq; + void __iomem *evca; + void __iomem *trca; + int rc; + + pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + + trca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); + trca = devm_ioremap_resource(&pdev->dev, trca_resource); + if (IS_ERR(trca)) { + rc = -ENOMEM; + goto bailout; + } + + evca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1); + evca = devm_ioremap_resource(&pdev->dev, evca_resource); + if (IS_ERR(evca)) { + rc = -ENOMEM; + goto bailout; + } + + /* + * This driver only handles the channel IRQs. + * Common IRQ is handled by the management driver. + */ + chirq = platform_get_irq(pdev, 0); + if (chirq < 0) { + rc = -ENODEV; + goto bailout; + } + + dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL); + if (!dmadev) { + rc = -ENOMEM; + goto bailout; + } + + INIT_LIST_HEAD(&dmadev->ddev.channels); + spin_lock_init(&dmadev->lock); + dmadev->ddev.dev = &pdev->dev; + pm_runtime_get_sync(dmadev->ddev.dev); + + dma_cap_set(DMA_MEMCPY, dmadev->ddev.cap_mask); + if (WARN_ON(!pdev->dev.dma_mask)) { + rc = -ENXIO; + goto dmafree; + } + + dmadev->dev_evca = evca; + dmadev->evca_resource = evca_resource; + dmadev->dev_trca = trca; + dmadev->trca_resource = trca_resource; + dmadev->ddev.device_prep_dma_memcpy = hidma_prep_dma_memcpy; + dmadev->ddev.device_alloc_chan_resources = hidma_alloc_chan_resources; + dmadev->ddev.device_free_chan_resources = hidma_free_chan_resources; + dmadev->ddev.device_tx_status = hidma_tx_status; + dmadev->ddev.device_issue_pending = hidma_issue_pending; + dmadev->ddev.device_pause = hidma_pause; + dmadev->ddev.device_resume = hidma_resume; + dmadev->ddev.device_terminate_all = hidma_terminate_all; + dmadev->ddev.copy_align = 8; + + device_property_read_u32(&pdev->dev, "desc-count", + &dmadev->nr_descriptors); + + if (!dmadev->nr_descriptors && nr_desc_prm) + dmadev->nr_descriptors = nr_desc_prm; + + if (!dmadev->nr_descriptors) + dmadev->nr_descriptors = HIDMA_NR_DEFAULT_DESC; + + dmadev->chidx = readl(dmadev->dev_trca + 0x28); + + /* Set DMA mask to 64 bits. */ + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (rc) { + dev_warn(&pdev->dev, "unable to set coherent mask to 64"); + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (rc) + goto dmafree; + } + + dmadev->lldev = hidma_ll_init(dmadev->ddev.dev, + dmadev->nr_descriptors, dmadev->dev_trca, + dmadev->dev_evca, dmadev->chidx); + if (!dmadev->lldev) { + rc = -EPROBE_DEFER; + goto dmafree; + } + + rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler, 0, + "qcom-hidma", dmadev->lldev); + if (rc) + goto uninit; + + INIT_LIST_HEAD(&dmadev->ddev.channels); + rc = hidma_chan_init(dmadev, 0); + if (rc) + goto uninit; + + rc = dma_async_device_register(&dmadev->ddev); + if (rc) + goto uninit; + + dmadev->irq = chirq; + tasklet_init(&dmadev->task, hidma_issue_task, (unsigned long)dmadev); + dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n"); + platform_set_drvdata(pdev, dmadev); + pm_runtime_mark_last_busy(dmadev->ddev.dev); + pm_runtime_put_autosuspend(dmadev->ddev.dev); + return 0; + +uninit: + hidma_ll_uninit(dmadev->lldev); +dmafree: + if (dmadev) + hidma_free(dmadev); +bailout: + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + return rc; +} + +static int hidma_remove(struct platform_device *pdev) +{ + struct hidma_dev *dmadev = platform_get_drvdata(pdev); + + pm_runtime_get_sync(dmadev->ddev.dev); + dma_async_device_unregister(&dmadev->ddev); + devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev); + hidma_ll_uninit(dmadev->lldev); + hidma_free(dmadev); + + dev_info(&pdev->dev, "HI-DMA engine removed\n"); + pm_runtime_put_sync_suspend(&pdev->dev); + pm_runtime_disable(&pdev->dev); + + return 0; +} + +#if IS_ENABLED(CONFIG_ACPI) +static const struct acpi_device_id hidma_acpi_ids[] = { + {"QCOM8061"}, + {}, +}; +#endif + +static const struct of_device_id hidma_match[] = { + {.compatible = "qcom,hidma-1.0",}, + {}, +}; + +MODULE_DEVICE_TABLE(of, hidma_match); + +static struct platform_driver hidma_driver = { + .probe = hidma_probe, + .remove = hidma_remove, + .driver = { + .name = "hidma", + .of_match_table = hidma_match, + .acpi_match_table = ACPI_PTR(hidma_acpi_ids), + }, +}; + +module_platform_driver(hidma_driver); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/dma/qcom/hidma.h b/drivers/dma/qcom/hidma.h new file mode 100644 index 0000000..231e306 --- /dev/null +++ b/drivers/dma/qcom/hidma.h @@ -0,0 +1,160 @@ +/* + * Qualcomm Technologies HIDMA data structures + * + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef QCOM_HIDMA_H +#define QCOM_HIDMA_H + +#include +#include +#include + +#define TRE_SIZE 32 /* each TRE is 32 bytes */ +#define TRE_CFG_IDX 0 +#define TRE_LEN_IDX 1 +#define TRE_SRC_LOW_IDX 2 +#define TRE_SRC_HI_IDX 3 +#define TRE_DEST_LOW_IDX 4 +#define TRE_DEST_HI_IDX 5 + +struct hidma_tx_status { + u8 err_info; /* error record in this transfer */ + u8 err_code; /* completion code */ +}; + +struct hidma_tre { + atomic_t allocated; /* if this channel is allocated */ + bool queued; /* flag whether this is pending */ + u16 status; /* status */ + u32 chidx; /* index of the tre */ + u32 dma_sig; /* signature of the tre */ + const char *dev_name; /* name of the device */ + void (*callback)(void *data); /* requester callback */ + void *data; /* Data associated with this channel*/ + struct hidma_lldev *lldev; /* lldma device pointer */ + u32 tre_local[TRE_SIZE / sizeof(u32) + 1]; /* TRE local copy */ + u32 tre_index; /* the offset where this was written*/ + u32 int_flags; /* interrupt flags */ +}; + +struct hidma_lldev { + bool initialized; /* initialized flag */ + u8 trch_state; /* trch_state of the device */ + u8 evch_state; /* evch_state of the device */ + u8 chidx; /* channel index in the core */ + u32 nr_tres; /* max number of configs */ + spinlock_t lock; /* reentrancy */ + struct hidma_tre *trepool; /* trepool of user configs */ + struct device *dev; /* device */ + void __iomem *trca; /* Transfer Channel address */ + void __iomem *evca; /* Event Channel address */ + struct hidma_tre + **pending_tre_list; /* Pointers to pending TREs */ + struct hidma_tx_status + *tx_status_list; /* Pointers to pending TREs status*/ + s32 pending_tre_count; /* Number of TREs pending */ + + void *tre_ring; /* TRE ring */ + dma_addr_t tre_ring_handle; /* TRE ring to be shared with HW */ + u32 tre_ring_size; /* Byte size of the ring */ + u32 tre_processed_off; /* last processed TRE */ + + void *evre_ring; /* EVRE ring */ + dma_addr_t evre_ring_handle; /* EVRE ring to be shared with HW */ + u32 evre_ring_size; /* Byte size of the ring */ + u32 evre_processed_off; /* last processed EVRE */ + + u32 tre_write_offset; /* TRE write location */ + struct tasklet_struct task; /* task delivering notifications */ + DECLARE_KFIFO_PTR(handoff_fifo, + struct hidma_tre *); /* pending TREs FIFO */ +}; + +struct hidma_desc { + struct dma_async_tx_descriptor desc; + /* link list node for this channel*/ + struct list_head node; + u32 tre_ch; +}; + +struct hidma_chan { + bool paused; + bool allocated; + char dbg_name[16]; + u32 dma_sig; + + /* + * active descriptor on this channel + * It is used by the DMA complete notification to + * locate the descriptor that initiated the transfer. + */ + struct dentry *debugfs; + struct dentry *stats; + struct hidma_dev *dmadev; + struct hidma_desc *running; + + struct dma_chan chan; + struct list_head free; + struct list_head prepared; + struct list_head active; + struct list_head completed; + + /* Lock for this structure */ + spinlock_t lock; +}; + +struct hidma_dev { + int irq; + int chidx; + u32 nr_descriptors; + + struct hidma_lldev *lldev; + void __iomem *dev_trca; + struct resource *trca_resource; + void __iomem *dev_evca; + struct resource *evca_resource; + + /* used to protect the pending channel list*/ + spinlock_t lock; + struct dma_device ddev; + + struct dentry *debugfs; + struct dentry *stats; + + /* Task delivering issue_pending */ + struct tasklet_struct task; +}; + +int hidma_ll_request(struct hidma_lldev *llhndl, u32 dev_id, + const char *dev_name, + void (*callback)(void *data), void *data, u32 *tre_ch); + +void hidma_ll_free(struct hidma_lldev *llhndl, u32 tre_ch); +enum dma_status hidma_ll_status(struct hidma_lldev *llhndl, u32 tre_ch); +bool hidma_ll_isenabled(struct hidma_lldev *llhndl); +void hidma_ll_queue_request(struct hidma_lldev *llhndl, u32 tre_ch); +void hidma_ll_start(struct hidma_lldev *llhndl); +int hidma_ll_pause(struct hidma_lldev *llhndl); +int hidma_ll_resume(struct hidma_lldev *llhndl); +void hidma_ll_set_transfer_params(struct hidma_lldev *llhndl, u32 tre_ch, + dma_addr_t src, dma_addr_t dest, u32 len, u32 flags); +int hidma_ll_setup(struct hidma_lldev *lldev); +struct hidma_lldev *hidma_ll_init(struct device *dev, u32 max_channels, + void __iomem *trca, void __iomem *evca, + u8 chidx); +int hidma_ll_uninit(struct hidma_lldev *llhndl); +irqreturn_t hidma_ll_inthandler(int irq, void *arg); +void hidma_cleanup_pending_tre(struct hidma_lldev *llhndl, u8 err_info, + u8 err_code); +#endif -- cgit v0.10.2