diff options
author | Zhao Qiang <B45475@freescale.com> | 2014-03-24 10:01:13 (GMT) |
---|---|---|
committer | Jose Rivera <German.Rivera@freescale.com> | 2014-03-24 20:00:40 (GMT) |
commit | 34ef41dd5ab2376a9e47afbd8d09332866404bb3 (patch) | |
tree | 59a7e1e5da5a9588df74823862c5758eb03bde89 | |
parent | ccaed6d4c23e0072f37e97eba160df23734c9e05 (diff) | |
download | linux-fsl-qoriq-34ef41dd5ab2376a9e47afbd8d09332866404bb3.tar.xz |
drivers/net: support hdlc function for QE-UCC
The driver add hdlc support for Freescale QUICC Engine.
It support NMSI and TSA mode.
Signed-off-by: Xie Xiaobo <r63061@freescale.com>
Signed-off-by: Zhao Qiang <B45475@freescale.com>
Change-Id: Iece969b4934241f0f1cb574c5014600ef63cfb95
Reviewed-on: http://git.am.freescale.net:8181/10113
Tested-by: Review Code-CDREVIEW <CDREVIEW@freescale.com>
Reviewed-by: Xiaobo Xie <X.Xie@freescale.com>
Reviewed-by: Jose Rivera <German.Rivera@freescale.com>
-rw-r--r-- | Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/ucc.txt | 3 | ||||
-rw-r--r-- | arch/powerpc/include/asm/ucc_fast.h | 1 | ||||
-rw-r--r-- | arch/powerpc/sysdev/qe_lib/Kconfig | 2 | ||||
-rw-r--r-- | arch/powerpc/sysdev/qe_lib/ucc_fast.c | 2 | ||||
-rw-r--r-- | drivers/net/wan/Kconfig | 10 | ||||
-rw-r--r-- | drivers/net/wan/Makefile | 1 | ||||
-rw-r--r-- | drivers/net/wan/fsl_ucc_hdlc.c | 1404 | ||||
-rw-r--r-- | drivers/net/wan/fsl_ucc_hdlc.h | 191 |
8 files changed, 1610 insertions, 4 deletions
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/ucc.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/ucc.txt index b1b569f..5e4a61b 100644 --- a/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/ucc.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/ucc.txt @@ -4,7 +4,8 @@ Required properties: - device_type : should be "network", "hldc", "uart", "transparent" "bisync", "atm", "tdm" or "serial". - compatible : Describes the specific device attached to the UCC. - Examples include "ucc_geth", "fsl_atm", "ucc_uart" and "fsl,ucc-tdm". + Examples include "ucc_geth", "fsl_atm", "ucc_uart","fsl,ucc-tdm", + and "fsl,ucc_hdlc". - cell-index : the ucc number(1-8), corresponding to UCCx in UM. - reg : Offset and length of the register set for the device - interrupts : <a b> where a is the interrupt number and b is a diff --git a/arch/powerpc/include/asm/ucc_fast.h b/arch/powerpc/include/asm/ucc_fast.h index 74fdd31..561b00b 100644 --- a/arch/powerpc/include/asm/ucc_fast.h +++ b/arch/powerpc/include/asm/ucc_fast.h @@ -34,6 +34,7 @@ #define T_W 0x20000000 /* wrap bit */ #define T_I 0x10000000 /* interrupt on completion */ #define T_L 0x08000000 /* last */ +#define T_TC 0x04000000 /* crc */ #define T_CM 0x02000000 /* CM */ /* Rx Data buffer must be 4 bytes aligned in most cases */ diff --git a/arch/powerpc/sysdev/qe_lib/Kconfig b/arch/powerpc/sysdev/qe_lib/Kconfig index 56e1228..644b7dbf 100644 --- a/arch/powerpc/sysdev/qe_lib/Kconfig +++ b/arch/powerpc/sysdev/qe_lib/Kconfig @@ -11,7 +11,7 @@ config UCC_SLOW config UCC_FAST bool - default y if UCC_GETH || FSL_UCC_TDM + default y if UCC_GETH || FSL_UCC_TDM || FSL_UCC_HDLC help This option provides qe_lib support to UCC fast protocols: HDLC, Ethernet, ATM, transparent diff --git a/arch/powerpc/sysdev/qe_lib/ucc_fast.c b/arch/powerpc/sysdev/qe_lib/ucc_fast.c index d72630e..67191a0 100644 --- a/arch/powerpc/sysdev/qe_lib/ucc_fast.c +++ b/arch/powerpc/sysdev/qe_lib/ucc_fast.c @@ -328,7 +328,6 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc ucc_fast_free(uccf); return -EINVAL; } -#ifdef CONFIG_FSL_UCC_TDM } else { /* tdm Rx clock routing */ if ((uf_info->rx_clock != QE_CLK_NONE) && @@ -365,7 +364,6 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc ucc_fast_free(uccf); return -EINVAL; } -#endif } /* Set interrupt mask register at UCC level. */ diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig index 94e2349..0288bdb 100644 --- a/drivers/net/wan/Kconfig +++ b/drivers/net/wan/Kconfig @@ -280,6 +280,16 @@ config DSCC4 To compile this driver as a module, choose M here: the module will be called dscc4. +config FSL_UCC_HDLC + tristate "freescale QUICC Engine HDLC support" + depends on HDLC + help + Driver for freescale QUICC Engine HDLC controller. The driver + support HDLC run on NMSI and TDM mode. + + To compile this driver as a module, choose M here: the + module will be called fsl_ucc_hdlc. + config DSCC4_PCISYNC bool "Etinc PCISYNC features" depends on DSCC4 diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile index c135ef4..25fec40 100644 --- a/drivers/net/wan/Makefile +++ b/drivers/net/wan/Makefile @@ -32,6 +32,7 @@ obj-$(CONFIG_WANXL) += wanxl.o obj-$(CONFIG_PCI200SYN) += pci200syn.o obj-$(CONFIG_PC300TOO) += pc300too.o obj-$(CONFIG_IXP4XX_HSS) += ixp4xx_hss.o +obj-$(CONFIG_FSL_UCC_HDLC) += fsl_ucc_hdlc.o clean-files := wanxlfw.inc $(obj)/wanxl.o: $(obj)/wanxlfw.inc diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c new file mode 100644 index 0000000..0c8cac6 --- /dev/null +++ b/drivers/net/wan/fsl_ucc_hdlc.c @@ -0,0 +1,1404 @@ +/* Freescale QUICC Engine HDLC Device Driver + * + * Copyright 2014 Freescale Semiconductor Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/stddef.h> +#include <linux/sched.h> +#include <linux/init.h> +#include <linux/platform_device.h> +#include <linux/netdevice.h> +#include <linux/skbuff.h> +#include <linux/hdlc.h> +#include <linux/io.h> +#include <linux/tdm.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/dma-mapping.h> +#include <linux/spinlock.h> +#include <linux/delay.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> +#include <sysdev/fsl_soc.h> +#include <linux/slab.h> +#include "fsl_ucc_hdlc.h" + +#define DRV_DESC "Freescale QE UCC HDLC Driver" +#define DRV_NAME "ucc_hdlc" + +#undef DEBUG + +static struct ucc_hdlc_info uhdlc_primary_info = { + .uf_info = { + .tsa = 0, + .cdp = 0, + .cds = 1, + .ctsp = 0, + .ctss = 1, + .revd = 0, + .urfs = 256, + .utfs = 256, + .urfet = 128, + .urfset = 192, + .utfet = 128, + .utftt = 0x40, + .ufpt = 256, + .mode = UCC_FAST_PROTOCOL_MODE_HDLC, + .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL, + .tenc = UCC_FAST_TX_ENCODING_NRZ, + .renc = UCC_FAST_RX_ENCODING_NRZ, + .tcrc = UCC_FAST_16_BIT_CRC, + .synl = UCC_FAST_SYNC_LEN_NOT_USED, + }, + + .si_info = { + .simr_rfsd = 1, /* TDM card need 1 bit delay */ + .simr_tfsd = 0, + .simr_crt = 0, + .simr_sl = 0, + .simr_ce = 1, + .simr_fe = 1, + .simr_gm = 0, + }, +}; + +static struct ucc_hdlc_info uhdlc_info[MAX_HDLC_NUM]; +static int siram_init_flag; + + +#ifdef DEBUG +static void dump_siram(struct ucc_hdlc_private *priv) +{ + int i; + u16 *siram = priv->siram; + + dev_info(priv->dev, "Dump the SI RX RAM\n"); + for (i = 0; i < priv->num_of_ts; i++) { + pr_info("%04x ", siram[priv->siram_entry_id * 32 + i]); + if ((i + 1) % 4) + pr_info("\n"); + } + + dev_info(priv->dev, "Dump the SI TX RAM\n"); + for (i = 0; i < priv->num_of_ts; i++) { + pr_info("%04x ", siram[priv->siram_entry_id * 32 + 0x200 + i]); + if ((i + 1) % 4) + pr_info("\n"); + } +} + +static void mem_disp(u8 *addr, int size) +{ + void *i; + int size16_aling = (size >> 4) << 4; + int size4_aling = (size >> 2) << 2; + int not_align = 0; + if (size % 16) + not_align = 1; + + for (i = addr; i < addr + size16_aling; i += 16) { + u32 *i32 = i; + + pr_info("0x%08p: %08x %08x %08x %08x\r\n", + i32, i32[0], i32[1], i32[2], i32[3]); + } + + if (not_align == 1) + pr_info("0x%08p: ", i); + for (; i < addr + size4_aling; i += 4) + pr_info("%08x ", *((u32 *) (i))); + for (; i < addr + size; i++) + pr_info("%02x", *((u8 *) (i))); + if (not_align == 1) + pr_info("\r\n"); +} + +static void dump_ucc(struct ucc_hdlc_private *priv) +{ + struct ucc_hdlc_param *ucc_pram; + ucc_pram = priv->ucc_pram; + + dev_info(priv->dev, "DumpiniCC %d Registers\n", + priv->uh_info->uf_info.ucc_num); + ucc_fast_dump_regs(priv->uccf); + dev_info(priv->dev, "Dumping UCC %d Parameter RAM\n", + priv->uh_info->uf_info.ucc_num); + dev_info(priv->dev, "rbase = 0x%x\n", in_be32(&ucc_pram->rbase)); + dev_info(priv->dev, "rbptr = 0x%x\n", in_be32(&ucc_pram->rbptr)); + dev_info(priv->dev, "mrblr = 0x%x\n", in_be16(&ucc_pram->mrblr)); + dev_info(priv->dev, "rbdlen = 0x%x\n", in_be16(&ucc_pram->rbdlen)); + dev_info(priv->dev, "rbdstat = 0x%x\n", in_be16(&ucc_pram->rbdstat)); + dev_info(priv->dev, "rstate = 0x%x\n", in_be32(&ucc_pram->rstate)); + dev_info(priv->dev, "rdptr = 0x%x\n", in_be32(&ucc_pram->rdptr)); + dev_info(priv->dev, "riptr = 0x%x\n", in_be16(&ucc_pram->riptr)); + dev_info(priv->dev, "tbase = 0x%x\n", in_be32(&ucc_pram->tbase)); + dev_info(priv->dev, "tbptr = 0x%x\n", in_be32(&ucc_pram->tbptr)); + dev_info(priv->dev, "tbdlen = 0x%x\n", in_be16(&ucc_pram->tbdlen)); + dev_info(priv->dev, "tbdstat = 0x%x\n", in_be16(&ucc_pram->tbdstat)); + dev_info(priv->dev, "tstate = 0x%x\n", in_be32(&ucc_pram->tstate)); + dev_info(priv->dev, "tdptr = 0x%x\n", in_be32(&ucc_pram->tdptr)); + dev_info(priv->dev, "tiptr = 0x%x\n", in_be16(&ucc_pram->tiptr)); + dev_info(priv->dev, "rcrc = 0x%x\n", in_be32(&ucc_pram->rcrc)); + dev_info(priv->dev, "tcrc = 0x%x\n", in_be32(&ucc_pram->tcrc)); + dev_info(priv->dev, "c_mask = 0x%x\n", in_be32(&ucc_pram->c_mask)); + dev_info(priv->dev, "c_pers = 0x%x\n", in_be32(&ucc_pram->c_pres)); + dev_info(priv->dev, "disfc = 0x%x\n", in_be16(&ucc_pram->disfc)); + dev_info(priv->dev, "crcec = 0x%x\n", in_be16(&ucc_pram->crcec)); +} + +static void dump_bds(struct ucc_hdlc_private *priv) +{ + int length; + + if (priv->tx_bd_base) { + length = sizeof(struct qe_bd) * NUM_OF_BUF; + dev_info(priv->dev, " Dump tx BDs\n"); + mem_disp((u8 *)priv->tx_bd_base, length); + } + + if (priv->rx_bd_base) { + length = sizeof(struct qe_bd) * NUM_OF_BUF; + dev_info(priv->dev, " Dump rx BDs\n"); + mem_disp((u8 *)priv->rx_bd_base, length); + } + +} + +static void dump_priv(struct ucc_hdlc_private *priv) +{ + dev_info(priv->dev, "uh_info = 0x%x\n", (u32)priv->uh_info); + dev_info(priv->dev, "uccf = 0x%x\n", (u32)priv->uccf); + dev_info(priv->dev, "uf_regs = 0x%x\n", (u32)priv->uf_regs); + dev_info(priv->dev, "si_regs = 0x%x\n", (u32)priv->si_regs); + dev_info(priv->dev, "ucc_pram = 0x%x\n", (u32)priv->ucc_pram); + dev_info(priv->dev, "tdm_port = 0x%x\n", (u32)priv->tdm_port); + dev_info(priv->dev, "siram_entry_id = 0x%x\n", priv->siram_entry_id); + dev_info(priv->dev, "siram = 0x%x\n", (u32)priv->siram); + dev_info(priv->dev, "tdm_mode = 0x%x\n", (u32)priv->tdm_mode); + dev_info(priv->dev, "tdm_framer_type; = 0x%x\n", + (u32)priv->tdm_framer_type); + dev_info(priv->dev, "rx_buffer; = 0x%x\n", (u32)priv->rx_buffer); + dev_info(priv->dev, "tx_buffer; = 0x%x\n", (u32)priv->tx_buffer); + dev_info(priv->dev, "dma_rx_addr; = 0x%x\n", (u32)priv->dma_rx_addr); + dev_info(priv->dev, "tx_bd; = 0x%x\n", (u32)priv->tx_bd_base); + dev_info(priv->dev, "rx_bd; = 0x%x\n", (u32)priv->rx_bd_base); + dev_info(priv->dev, "phase_rx = 0x%x\n", (u32)priv->phase_rx); + dev_info(priv->dev, "phase_tx = 0x%x\n", (u32)priv->phase_tx); + dev_info(priv->dev, "ucc_pram_offset = 0x%x\n", priv->ucc_pram_offset); + +} + +#endif /* DEBUG */ + +static void init_si(struct ucc_hdlc_private *priv) +{ + struct si1 __iomem *si_regs; + u16 __iomem *siram; + u16 siram_entry_valid; + u16 siram_entry_closed; + u16 ucc_num; + u8 csel; + u16 sixmr; + u16 tdm_port; + u32 siram_entry_id; + u32 mask; + int i; + + si_regs = priv->si_regs; + siram = priv->siram; + ucc_num = priv->uh_info->uf_info.ucc_num; + tdm_port = priv->tdm_port; + siram_entry_id = priv->siram_entry_id; + + if (priv->tdm_framer_type == TDM_FRAMER_T1) + priv->num_of_ts = 24; + if (priv->tdm_framer_type == TDM_FRAMER_E1) + priv->num_of_ts = 32; + + /* set siram table */ + csel = (ucc_num < 4) ? ucc_num + 9 : ucc_num - 3; + + siram_entry_valid = SIR_CSEL(csel) | SIR_BYTE | SIR_CNT(0); + siram_entry_closed = SIR_IDLE | SIR_BYTE | SIR_CNT(0); + + for (i = 0; i < priv->num_of_ts; i++) { + mask = 0x01 << i; + + if (priv->tx_ts_mask & mask) + out_be16(&siram[siram_entry_id * 32 + i], + siram_entry_valid); + else + out_be16(&siram[siram_entry_id * 32 + i], + siram_entry_closed); + + if (priv->rx_ts_mask & mask) + out_be16(&siram[siram_entry_id * 32 + 0x200 + i], + siram_entry_valid); + else + out_be16(&siram[siram_entry_id * 32 + 0x200 + i], + siram_entry_closed); + } + + setbits16(&siram[(siram_entry_id * 32) + (priv->num_of_ts - 1)], + SIR_LAST); + setbits16(&siram[(siram_entry_id * 32) + 0x200 + (priv->num_of_ts - 1)], + SIR_LAST); + + /* Set SIxMR register */ + sixmr = SIMR_SAD(siram_entry_id); + + sixmr &= ~SIMR_SDM_MASK; + + if (priv->tdm_mode == TDM_INTERNAL_LOOPBACK) + sixmr |= SIMR_SDM_INTERNAL_LOOPBACK; + else + sixmr |= SIMR_SDM_NORMAL; + + sixmr |= SIMR_RFSD(priv->uh_info->si_info.simr_rfsd) | + SIMR_TFSD(priv->uh_info->si_info.simr_tfsd); + + if (priv->uh_info->si_info.simr_crt) + sixmr |= SIMR_CRT; + if (priv->uh_info->si_info.simr_sl) + sixmr |= SIMR_SL; + if (priv->uh_info->si_info.simr_ce) + sixmr |= SIMR_CE; + if (priv->uh_info->si_info.simr_fe) + sixmr |= SIMR_FE; + if (priv->uh_info->si_info.simr_gm) + sixmr |= SIMR_GM; + + switch (tdm_port) { + case 0: + out_be16(&si_regs->sixmr1[0], sixmr); + break; + case 1: + out_be16(&si_regs->sixmr1[1], sixmr); + break; + case 2: + out_be16(&si_regs->sixmr1[2], sixmr); + break; + case 3: + out_be16(&si_regs->sixmr1[3], sixmr); + break; + default: + dev_err(priv->dev, "can not find tdm sixmr reg\n"); + break; + } + +#ifdef DEBUG + dump_siram(priv); +#endif + +} +static int uhdlc_init(struct ucc_hdlc_private *priv) +{ + struct ucc_hdlc_info *uh_info; + struct ucc_fast_info *uf_info; + u32 cecr_subblock; + u32 bd_status; + int ret, i; + void *bd_buffer; + dma_addr_t bd_dma_addr; + u32 riptr; + u32 tiptr; + u32 gumr; + + uh_info = priv->uh_info; + uf_info = &uh_info->uf_info; + + if (priv->tsa) { + uf_info->tsa = 1; + uf_info->ctsp = 1; + } + uf_info->uccm_mask = (u32)((UCC_HDLC_UCCE_RXB | UCC_HDLC_UCCE_RXF | + UCC_HDLC_UCCE_TXB) << 16); + + if (ucc_fast_init(uf_info, &priv->uccf)) { + dev_err(priv->dev, "Failed to init uccf."); + return -ENOMEM; + } + + priv->uf_regs = priv->uccf->uf_regs; + ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX); + + /* Loopback mode */ + if (priv->loopback) { + gumr = in_be32(&priv->uf_regs->gumr); + gumr |= (0x40000000 | UCC_FAST_GUMR_CDS | UCC_FAST_GUMR_TCI); + gumr &= ~(UCC_FAST_GUMR_CTSP | UCC_FAST_GUMR_RSYN); + out_be32(&priv->uf_regs->gumr, gumr); + } + + /* Initialize SI */ + if (priv->tsa) + init_si(priv); + + /* Write to QE CECR, UCCx channel to Stop Transmission */ + cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num); + ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock, + (u8) QE_CR_PROTOCOL_UNSPECIFIED, 0); + + /* Set UPSMR normal mode (need fixed)*/ + out_be32(&priv->uf_regs->upsmr, 0); + + priv->rx_ring_size = RX_BD_RING_LEN; + priv->tx_ring_size = TX_BD_RING_LEN; + /* Alloc Rx BD */ + priv->rx_bd_base = dma_alloc_coherent(priv->dev, + RX_BD_RING_LEN * sizeof(struct qe_bd), + &priv->dma_rx_bd, GFP_KERNEL); + + if (IS_ERR_VALUE((unsigned long)priv->rx_bd_base)) { + dev_err(priv->dev, "Cannot allocate MURAM memory for RxBDs\n"); + ret = -ENOMEM; + goto rxbd_alloc_error; + } + + /* Alloc Tx BD */ + priv->tx_bd_base = dma_alloc_coherent(priv->dev, + TX_BD_RING_LEN * sizeof(struct qe_bd), + &priv->dma_tx_bd, GFP_KERNEL); + + if (IS_ERR_VALUE((unsigned long)priv->tx_bd_base)) { + dev_err(priv->dev, "Cannot allocate MURAM memory for TxBDs\n"); + ret = -ENOMEM; + goto txbd_alloc_error; + } + + /* Alloc parameter ram for ucc hdlc */ + priv->ucc_pram_offset = qe_muram_alloc(sizeof(priv->ucc_pram), + ALIGNMENT_OF_UCC_HDLC_PRAM); + + if (IS_ERR_VALUE(priv->ucc_pram_offset)) { + dev_err(priv->dev, "Can not allocate MURAM for hdlc prameter.\n"); + ret = -ENOMEM; + goto pram_alloc_error; + } + + priv->tx_skbuff = kmalloc_array(priv->tx_ring_size, + sizeof(*priv->tx_skbuff), GFP_KERNEL); + if (!priv->tx_skbuff) + goto pram_alloc_error; + for (i = 0; i < priv->tx_ring_size; i++) + priv->tx_skbuff[i] = NULL; + + priv->skb_curtx = 0; + priv->skb_dirtytx = 0; + priv->curtx_bd = priv->tx_bd_base; + priv->dirty_tx = priv->tx_bd_base; + priv->currx_bd = priv->rx_bd_base; + priv->currx_bdnum = 0; + + /* init parameter base */ + cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num); + ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock, + QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset); + + priv->ucc_pram = (struct ucc_hdlc_param __iomem *) + qe_muram_addr(priv->ucc_pram_offset); + + /* Zero out parameter ram */ + memset_io(priv->ucc_pram, 0, sizeof(struct ucc_hdlc_param)); + + /* Alloc riptr, tiptr */ + riptr = qe_muram_alloc(32, 32); + if (IS_ERR_VALUE(riptr)) { + dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n"); + ret = -ENOMEM; + goto riptr_alloc_error; + } + + tiptr = qe_muram_alloc(32, 32); + if (IS_ERR_VALUE(tiptr)) { + dev_err(priv->dev, "Cannot allocate MURAM mem for transmit internal temp data pointer\n"); + ret = -ENOMEM; + goto tiptr_alloc_error; + } + + /* Set RIPTR, TIPTR */ + out_be16(&priv->ucc_pram->riptr, (u16)riptr); + out_be16(&priv->ucc_pram->tiptr, (u16)tiptr); + + /* Set MRBLR */ + out_be16(&priv->ucc_pram->mrblr, (u16)MAX_RX_BUF_LENGTH); + + /* Set RBASE, TBASE */ + out_be32(&priv->ucc_pram->rbase, (u32)priv->dma_rx_bd); + out_be32(&priv->ucc_pram->tbase, (u32)priv->dma_tx_bd); + + /* Set RSTATE, TSTATE */ + out_be32(&priv->ucc_pram->rstate, 0x30000000); + out_be32(&priv->ucc_pram->tstate, 0x30000000); + + /* Set C_MASK, C_PRES for 16bit CRC */ + out_be32(&priv->ucc_pram->c_mask, 0x0000F0B8); + out_be32(&priv->ucc_pram->c_pres, 0x0000FFFF); + + out_be16(&priv->ucc_pram->mflr, MAX_RX_BUF_LENGTH + 8); + out_be16(&priv->ucc_pram->rfthr, 1); + out_be16(&priv->ucc_pram->rfcnt, 1); + out_be16(&priv->ucc_pram->hmask, DEFAULT_ADDR_MASK); + out_be16(&priv->ucc_pram->haddr2, DEFAULT_BROAD_ADDR); + out_be16(&priv->ucc_pram->haddr1, DEFAULT_HDLC_ADDR); + out_be16(&priv->ucc_pram->haddr3, DEFAULT_HDLC_ADDR); + out_be16(&priv->ucc_pram->haddr4, DEFAULT_HDLC_ADDR); + + /* Get BD buffer */ + bd_buffer = dma_alloc_coherent(priv->dev, + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH, + &bd_dma_addr, GFP_KERNEL); + + if (!bd_buffer) { + dev_err(priv->dev, "Could not allocate buffer descriptors\n"); + return -ENOMEM; + } + + memset(bd_buffer, 0, RX_BD_RING_LEN * MAX_RX_BUF_LENGTH); + + priv->rx_buffer = bd_buffer; + + priv->dma_rx_addr = bd_dma_addr; + + for (i = 0; i < RX_BD_RING_LEN; i++) { + if (i < (RX_BD_RING_LEN - 1)) + bd_status = R_E | R_I; + else + bd_status = R_E | R_I | R_W; + + out_be32((u32 *)(priv->rx_bd_base + i), bd_status); + out_be32(&priv->rx_bd_base[i].buf, priv->dma_rx_addr + + i * MAX_RX_BUF_LENGTH); + } + + for (i = 0; i < TX_BD_RING_LEN; i++) { + if (i < (TX_BD_RING_LEN - 1)) + bd_status = T_I | T_TC; + else + bd_status = T_I | T_TC | T_W; + + out_be32((u32 *)(priv->tx_bd_base + i), bd_status); + } + + return 0; + +tiptr_alloc_error: + qe_muram_free(riptr); +riptr_alloc_error: + qe_muram_free(priv->ucc_pram_offset); +pram_alloc_error: + dma_free_coherent(priv->dev, + TX_BD_RING_LEN * sizeof(struct qe_bd), + priv->tx_bd_base, priv->dma_tx_bd); +txbd_alloc_error: + dma_free_coherent(priv->dev, + RX_BD_RING_LEN * sizeof(struct qe_bd), + priv->rx_bd_base, priv->dma_rx_bd); +rxbd_alloc_error: + ucc_fast_free(priv->uccf); + + return ret; +} + +static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev) +{ + hdlc_device *hdlc = dev_to_hdlc(dev); + struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)hdlc->priv; + struct qe_bd __iomem *bd; + u32 bd_status; + u8 *send_buf; + int i; + u32 *hdlc_head, tmp_head; + + if (skb_headroom(skb) < HDLC_HEAD_LEN) { + dev->stats.tx_dropped++; + dev_kfree_skb(skb); + netdev_err(dev, "No enough space for hdlc head\n"); + return -ENOMEM; + } + skb_push(skb, HDLC_HEAD_LEN); + + hdlc_head = (u32 *)skb->data; + tmp_head = *hdlc_head; + tmp_head = (tmp_head & HDLC_HEAD_MASK) | DEFAULT_HDLC_HEAD; + *hdlc_head = tmp_head; + + dev->stats.tx_bytes += skb->len; + + send_buf = (u8 *)skb->data; + + if (priv->loopback) { + pr_info("\nTransmitted data:\n"); + for (i = 0; (i < 16); i++) + pr_info("%x ", send_buf[i]); + } + + /* Start from the next BD that should be filled */ + bd = priv->curtx_bd; + bd_status = in_be32((u32 __iomem *)bd); + /* Save the skb pointer so we can free it later */ + priv->tx_skbuff[priv->skb_curtx] = skb; + + + /* Update the current skb pointer (wrapping if this was the last) */ + priv->skb_curtx = + (priv->skb_curtx + 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN); + /* set up the buffer descriptor */ + out_be32(&((struct qe_bd __iomem *)bd)->buf, + dma_map_single(priv->dev, skb->data, + skb->len, DMA_TO_DEVICE)); + + bd_status = (bd_status & T_W) | T_R | T_I | T_L | T_TC | skb->len; + + /* set bd status and length */ + out_be32((u32 __iomem *)bd, bd_status); + + /* Move to next BD in the ring */ + if (!(bd_status & T_W)) + bd += 1; + else + bd = priv->tx_bd_base; + priv->curtx_bd = bd; + + return NETDEV_TX_OK; +} + +static int hdlc_tx_done(struct ucc_hdlc_private *priv) +{ + /* Start from the next BD that should be filled */ + struct net_device *dev = priv->ndev; + struct qe_bd *bd; /* BD pointer */ + u32 bd_status; + + bd = priv->dirty_tx; + bd_status = in_be32((u32 __iomem *)bd); + + /* Normal processing. */ + while ((bd_status & T_R) == 0) { + struct sk_buff *skb; + + /* BD contains already transmitted buffer. */ + /* Handle the transmitted buffer and release */ + /* the BD to be used with the current frame */ + + skb = priv->tx_skbuff[priv->skb_dirtytx]; + if (!skb) + break; + + dev->stats.tx_packets++; + dma_unmap_single(priv->dev, + in_be32(&((struct qe_bd __iomem *)bd)->buf), + skb->len, DMA_TO_DEVICE); + dev_kfree_skb_irq(skb); + + priv->tx_skbuff[priv->skb_dirtytx] = NULL; + priv->skb_dirtytx = + (priv->skb_dirtytx + + 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN); + + /* We freed a buffer, so now we can restart transmission */ + if (netif_queue_stopped(dev)) + netif_wake_queue(dev); + + /* Advance the confirmation BD pointer */ + if (!(bd_status & T_W)) + bd += 1; + else + bd = priv->tx_bd_base; + bd_status = in_be32((u32 __iomem *)bd); + } + priv->dirty_tx = bd; + + return 0; +} + +static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit) +{ + struct net_device *dev = priv->ndev; + struct sk_buff *skb; + hdlc_device *hdlc = dev_to_hdlc(dev); + struct qe_bd *bd; + u32 bd_status; + u16 length, howmany = 0; + u8 *bdbuffer; + int i; + + bd = priv->currx_bd; + bd_status = in_be32((u32 __iomem *)bd); + + /* while there are received buffers and BD is full (~R_E) */ + while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) { + bdbuffer = priv->rx_buffer + + (priv->currx_bdnum * MAX_RX_BUF_LENGTH); + length = (u16) (bd_status & BD_LENGTH_MASK); + + if (priv->loopback) { + pr_info("\nReceived data:\n"); + for (i = 0; (i < 16); i++) + pr_info("%x ", bdbuffer[i]); + } + + bdbuffer += HDLC_HEAD_LEN; + length -= (HDLC_HEAD_LEN + HDLC_CRC_SIZE); + skb = dev_alloc_skb(length); + if (!skb) { + dev->stats.rx_dropped++; + return -ENOMEM; + } + + skb_put(skb, length); + skb->len = length; + skb->dev = dev; + memcpy(skb->data, bdbuffer, length); + + dev->stats.rx_packets++; + dev->stats.rx_bytes += skb->len; + howmany++; + if (hdlc->proto) + skb->protocol = hdlc_type_trans(skb, dev); + else + skb->protocol = cpu_to_be16(ETH_P_IP); + netif_rx(skb); + + /* update to point at the next bd */ + if (bd_status & R_W) + bd = priv->rx_bd_base; + else + bd += 1; + + if (priv->currx_bdnum < (RX_BD_RING_LEN - 1)) + priv->currx_bdnum += 1; + else + priv->currx_bdnum = RX_BD_RING_LEN - 1; + + bd_status = in_be32((u32 __iomem *)bd); + } + + priv->currx_bd = bd; + return howmany; +} + +static irqreturn_t ucc_hdlc_irq_handler(int irq, void *dev_id) +{ + struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)dev_id; + struct ucc_fast_private *uccf; + struct ucc_hdlc_info *uh_info; + u32 ucce; + u32 uccm; + + uh_info = priv->uh_info; + uccf = priv->uccf; + + ucce = in_be32(uccf->p_ucce); + uccm = in_be32(uccf->p_uccm); + + if ((ucce >> 16) & (UCC_HDLC_UCCE_RXF | UCC_HDLC_UCCE_RXB)) + hdlc_rx_done(priv, RX_CLEAN_MAX); + + if ((ucce >> 16) & UCC_HDLC_UCCE_TXB) + hdlc_tx_done(priv); + + out_be32(uccf->p_ucce, ucce); + + return IRQ_HANDLED; +} + +static int uhdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + const size_t size = sizeof(te1_settings); + te1_settings line; + struct ucc_hdlc_private *priv = netdev_priv(dev); + + if (cmd != SIOCWANDEV) + return hdlc_ioctl(dev, ifr, cmd); + + switch (ifr->ifr_settings.type) { + case IF_GET_IFACE: + ifr->ifr_settings.type = IF_IFACE_E1; + if (ifr->ifr_settings.size < size) { + ifr->ifr_settings.size = size; /* data size wanted */ + return -ENOBUFS; + } + line.clock_type = priv->clocking; + line.clock_rate = 0; + line.loopback = 0; + + if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size)) + return -EFAULT; + return 0; + + default: + return hdlc_ioctl(dev, ifr, cmd); + } +} + +static int uhdlc_open(struct net_device *dev) +{ + u32 cecr_subblock; + struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv; + + if (priv->hdlc_busy != 1) { + if (request_irq(priv->uh_info->uf_info.irq, + ucc_hdlc_irq_handler, 0, + "hdlc", (void *)priv)) { + dev_err(priv->dev, "request_irq for ucc hdlc failed\n"); + return -ENODEV; + } + cecr_subblock = ucc_fast_get_qe_cr_subblock( + priv->uh_info->uf_info.ucc_num); + + qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock, + (u8) QE_CR_PROTOCOL_UNSPECIFIED, 0); + + ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX); + + /* Enable the TDM port */ + if (priv->tsa) + priv->si_regs->siglmr1_h |= (0x1 << priv->tdm_port); + + priv->hdlc_busy = 1; + } else + dev_err(priv->dev, "HDLC IS RUNNING!\n"); + +#ifdef DEBUG + dump_priv(priv); + dump_ucc(priv); + dump_bds(priv); +#endif + + return 0; +} + +static void uhdlc_memclean(struct ucc_hdlc_private *priv) +{ + qe_muram_free(priv->ucc_pram->riptr); + qe_muram_free(priv->ucc_pram->tiptr); + + if (priv->rx_bd_base) { + dma_free_coherent(priv->dev, + RX_BD_RING_LEN * sizeof(struct qe_bd), + priv->rx_bd_base, priv->dma_rx_bd); + + priv->rx_bd_base = NULL; + priv->dma_rx_bd = 0; + } + + if (priv->tx_bd_base) { + dma_free_coherent(priv->dev, + TX_BD_RING_LEN * sizeof(struct qe_bd), + priv->tx_bd_base, priv->dma_tx_bd); + + priv->tx_bd_base = NULL; + priv->dma_tx_bd = 0; + } + + if (priv->ucc_pram) { + qe_muram_free(priv->ucc_pram_offset); + priv->ucc_pram = NULL; + priv->ucc_pram_offset = 0; + } + + if (priv->uf_regs) { + iounmap(priv->uf_regs); + priv->uf_regs = NULL; + } + + if (priv->uccf) { + ucc_fast_free(priv->uccf); + priv->uccf = NULL; + } + + if (priv->rx_buffer) { + dma_free_coherent(priv->dev, + 2 * NUM_OF_BUF * MAX_RX_BUF_LENGTH, + priv->rx_buffer, priv->dma_rx_addr); + priv->rx_buffer = NULL; + priv->dma_rx_addr = 0; + } +} + +static int uhdlc_close(struct net_device *dev) +{ + struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv; + u32 cecr_subblock; + + cecr_subblock = ucc_fast_get_qe_cr_subblock( + priv->uh_info->uf_info.ucc_num); + + qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock, + (u8) QE_CR_PROTOCOL_UNSPECIFIED, 0); + qe_issue_cmd(QE_CLOSE_RX_BD, cecr_subblock, + (u8) QE_CR_PROTOCOL_UNSPECIFIED, 0); + + if (priv->tsa) + priv->si_regs->siglmr1_h &= ~(0x1 << priv->tdm_port); + + ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX); + + free_irq(priv->uh_info->uf_info.irq, priv); + priv->hdlc_busy = 0; + + return 0; +} + +static enum tdm_mode_t set_tdm_mode(const char *tdm_mode_type) +{ + if (strcasecmp(tdm_mode_type, "internal-loopback") == 0) + return TDM_INTERNAL_LOOPBACK; + else + return TDM_NORMAL; +} + + +static enum tdm_framer_t set_tdm_framer(const char *tdm_framer_type) +{ + if (strcasecmp(tdm_framer_type, "e1") == 0) + return TDM_FRAMER_E1; + else + return TDM_FRAMER_T1; +} + +static void set_si_param(struct ucc_hdlc_private *priv) +{ + struct si_mode_info *si_info = &priv->uh_info->si_info; + + if (priv->tdm_mode == TDM_INTERNAL_LOOPBACK) { + si_info->simr_crt = 1; + si_info->simr_rfsd = 0; + } +} + +static int of_parse_hdlc_tdm(struct device_node *np, + struct ucc_hdlc_private *priv, struct ucc_hdlc_info *uh_info) +{ + const unsigned int *prop; + const char *sprop; + int ret = 0; + + sprop = of_get_property(np, "fsl,rx-sync-clock", NULL); + if (sprop) { + uh_info->uf_info.rx_sync = qe_clock_source(sprop); + if ((uh_info->uf_info.rx_sync < QE_CLK_NONE) || + (uh_info->uf_info.rx_sync > QE_RSYNC_PIN)) { + dev_err(priv->dev, "Invalid rx-sync-clock property\n"); + return -EINVAL; + } + } else { + dev_err(priv->dev, "Invalid rx-sync-clock property\n"); + return -EINVAL; + } + + sprop = of_get_property(np, "fsl,tx-sync-clock", NULL); + if (sprop) { + uh_info->uf_info.tx_sync = qe_clock_source(sprop); + if ((uh_info->uf_info.tx_sync < QE_CLK_NONE) || + (uh_info->uf_info.tx_sync > QE_TSYNC_PIN)) { + dev_err(priv->dev, "Invalid tx-sync-clock property\n"); + return -EINVAL; + } + } else { + dev_err(priv->dev, "Invalid tx-sync-clock property\n"); + return -EINVAL; + } + + prop = of_get_property(np, "fsl,tx-timeslot", NULL); + if (!prop) { + ret = -EINVAL; + dev_err(priv->dev, "Invalid tx-timeslot property\n"); + return ret; + } + priv->tx_ts_mask = *prop; + + prop = of_get_property(np, "fsl,rx-timeslot", NULL); + if (!prop) { + ret = -EINVAL; + dev_err(priv->dev, "Invalid rx-timeslot property\n"); + return ret; + } + priv->rx_ts_mask = *prop; + + prop = of_get_property(np, "fsl,tdm-id", NULL); + if (!prop) { + ret = -EINVAL; + dev_err(priv->dev, "No fsl,tdm-id property for this UCC\n"); + return ret; + } + priv->tdm_port = *prop; + uh_info->uf_info.tdm_num = priv->tdm_port; + + prop = of_get_property(np, "fsl,tdm-mode", NULL); + if (!prop) { + ret = -EINVAL; + dev_err(priv->dev, "No tdm-mode property for UCC\n"); + return ret; + } + priv->tdm_mode = set_tdm_mode((const char *)prop); + + prop = of_get_property(np, "fsl,tdm-framer-type", NULL); + if (!prop) { + ret = -EINVAL; + dev_err(priv->dev, "No tdm-framer-type property for UCC\n"); + return ret; + } + priv->tdm_framer_type = set_tdm_framer((const char *)prop); + + prop = of_get_property(np, "fsl,siram-entry-id", NULL); + if (!prop) { + ret = -EINVAL; + dev_err(priv->dev, "No siram entry id for UCC\n"); + return ret; + } + priv->siram_entry_id = *(const u32 *)prop; + + set_si_param(priv); + + return ret; +} + +static int ucc_hdlc_attach(struct net_device *dev, unsigned short encoding, + unsigned short parity) +{ + struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv; + + if (encoding != ENCODING_NRZ && + encoding != ENCODING_NRZI) + return -EINVAL; + + if (parity != PARITY_NONE && + parity != PARITY_CRC32_PR1_CCITT && + parity != PARITY_CRC16_PR1_CCITT) + return -EINVAL; + + priv->encoding = encoding; + priv->parity = parity; + + return 0; +} + +#ifdef CONFIG_PM +static void store_clk_config(struct ucc_hdlc_private *priv) +{ + struct qe_mux *qe_mux_reg = &qe_immr->qmx; + + /* store si clk */ + priv->cmxsi1cr_h = in_be32(&qe_mux_reg->cmxsi1cr_h); + priv->cmxsi1cr_l = in_be32(&qe_mux_reg->cmxsi1cr_l); + + /* store si sync */ + priv->cmxsi1syr = in_be32(&qe_mux_reg->cmxsi1syr); + + /* store ucc clk */ + memcpy_fromio(priv->cmxucr, qe_mux_reg->cmxucr, 4 * sizeof(u32)); +} + +static void resume_clk_config(struct ucc_hdlc_private *priv) +{ + struct qe_mux *qe_mux_reg = &qe_immr->qmx; + + memcpy_toio(qe_mux_reg->cmxucr, priv->cmxucr, 4 * sizeof(u32)); + + out_be32(&qe_mux_reg->cmxsi1cr_h, priv->cmxsi1cr_h); + out_be32(&qe_mux_reg->cmxsi1cr_l, priv->cmxsi1cr_l); + + out_be32(&qe_mux_reg->cmxsi1syr, priv->cmxsi1syr); + +} + +static int uhdlc_suspend(struct device *dev) +{ + struct ucc_hdlc_private *priv = dev_get_drvdata(dev); + struct ucc_hdlc_info *uh_info; + struct ucc_fast __iomem *uf_regs; + + if (!priv) + return -EINVAL; + + uh_info = priv->uh_info; + uf_regs = priv->uf_regs; + + /* backup gumr guemr*/ + priv->gumr = in_be32(&uf_regs->gumr); + priv->guemr = in_8(&uf_regs->guemr); + + priv->ucc_pram_bak = kmalloc(sizeof(struct ucc_hdlc_param), + GFP_KERNEL); + if (!priv->ucc_pram_bak) + return -ENOMEM; + + /* backup HDLC parameter */ + memcpy_fromio(priv->ucc_pram_bak, priv->ucc_pram, + sizeof(struct ucc_hdlc_param)); + + /* store the clk configuration */ + store_clk_config(priv); + + /* save power */ + ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX); + + dev_dbg(dev, "ucc hdlc suspend\n"); + return 0; +} + +static int uhdlc_resume(struct device *dev) +{ + struct ucc_hdlc_private *priv = dev_get_drvdata(dev); + struct ucc_hdlc_info *uh_info; + struct ucc_fast __iomem *uf_regs; + struct ucc_fast_private *uccf; + struct ucc_fast_info *uf_info; + int ret, i; + u32 cecr_subblock, bd_status; + + if (!priv) + return -EINVAL; + + uh_info = priv->uh_info; + uf_info = &uh_info->uf_info; + uf_regs = priv->uf_regs; + uccf = priv->uccf; + + /* restore gumr guemr */ + out_8(&uf_regs->guemr, priv->guemr); + out_be32(&uf_regs->gumr, priv->gumr); + + /* Set Virtual Fifo registers */ + out_be16(&uf_regs->urfs, uf_info->urfs); + out_be16(&uf_regs->urfet, uf_info->urfet); + out_be16(&uf_regs->urfset, uf_info->urfset); + out_be16(&uf_regs->utfs, uf_info->utfs); + out_be16(&uf_regs->utfet, uf_info->utfet); + out_be16(&uf_regs->utftt, uf_info->utftt); + /* utfb, urfb are offsets from MURAM base */ + out_be32(&uf_regs->utfb, uccf->ucc_fast_tx_virtual_fifo_base_offset); + out_be32(&uf_regs->urfb, uccf->ucc_fast_rx_virtual_fifo_base_offset); + + /* Rx Tx and sync clock routing */ + resume_clk_config(priv); + + out_be32(&uf_regs->uccm, uf_info->uccm_mask); + out_be32(&uf_regs->ucce, 0xffffffff); + + ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX); + + /* rebuild SIRAM */ + if (priv->tsa) + init_si(priv); + + /* Write to QE CECR, UCCx channel to Stop Transmission */ + cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num); + ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock, + (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0); + + /* Set UPSMR normal mode */ + out_be32(&uf_regs->upsmr, 0); + + /* init parameter base */ + cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num); + ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock, + QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset); + + priv->ucc_pram = (struct ucc_hdlc_param __iomem *) + qe_muram_addr(priv->ucc_pram_offset); + + /* restore ucc parameter */ + memcpy_toio(priv->ucc_pram, priv->ucc_pram_bak, + sizeof(struct ucc_hdlc_param)); + kfree(priv->ucc_pram_bak); + + /* rebuild BD entry */ + for (i = 0; i < RX_BD_RING_LEN; i++) { + if (i < (RX_BD_RING_LEN - 1)) + bd_status = R_E | R_I; + else + bd_status = R_E | R_I | R_W; + + out_be32((u32 *)(priv->rx_bd_base + i), bd_status); + out_be32(&priv->rx_bd_base[i].buf, priv->dma_rx_addr + + i * MAX_RX_BUF_LENGTH); + } + + for (i = 0; i < TX_BD_RING_LEN; i++) { + if (i < (TX_BD_RING_LEN - 1)) + bd_status = T_I | T_TC; + else + bd_status = T_I | T_TC | T_W; + + out_be32((u32 *)(priv->tx_bd_base + i), bd_status); + } + + /* if hdlc is busy enable TX and RX */ + if (priv->hdlc_busy == 1) { + cecr_subblock = ucc_fast_get_qe_cr_subblock( + priv->uh_info->uf_info.ucc_num); + + qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock, + (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0); + + ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX); + + /* Enable the TDM port */ + if (priv->tsa) + priv->si_regs->siglmr1_h |= (0x1 << priv->tdm_port); + } + + return 0; +} + +static const struct dev_pm_ops uhdlc_pm_ops = { + .suspend = uhdlc_suspend, + .resume = uhdlc_resume, + .freeze = uhdlc_suspend, + .thaw = uhdlc_resume, +}; + +#define HDLC_PM_OPS (&uhdlc_pm_ops) + +#else + +#define HDLC_PM_OPS NULL + +#endif +static const struct net_device_ops uhdlc_ops = { + .ndo_open = uhdlc_open, + .ndo_stop = uhdlc_close, + .ndo_change_mtu = hdlc_change_mtu, + .ndo_start_xmit = hdlc_start_xmit, + .ndo_do_ioctl = uhdlc_ioctl, +}; + +static int ucc_hdlc_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct ucc_hdlc_private *uhdlc_priv = NULL; + struct ucc_hdlc_info *uh_info; + struct resource res; + struct device_node *np2; + struct net_device *dev; + hdlc_device *hdlc; + int ucc_num; + const unsigned int *prop; + const char *sprop; + int ret; + + prop = of_get_property(np, "cell-index", NULL); + if (!prop) { + dev_err(&pdev->dev, "Invalid ucc property\n"); + return -ENODEV; + } + + ucc_num = *prop - 1; + if ((ucc_num > 3) || (ucc_num < 0)) { + dev_err(&pdev->dev, ": Invalid UCC num\n"); + return -EINVAL; + } + + memcpy(&(uhdlc_info[ucc_num]), &uhdlc_primary_info, + sizeof(uhdlc_primary_info)); + + uh_info = &uhdlc_info[ucc_num]; + uh_info->uf_info.ucc_num = ucc_num; + + sprop = of_get_property(np, "rx-clock-name", NULL); + if (sprop) { + uh_info->uf_info.rx_clock = qe_clock_source(sprop); + if ((uh_info->uf_info.rx_clock < QE_CLK_NONE) || + (uh_info->uf_info.rx_clock > QE_CLK24)) { + dev_err(&pdev->dev, "Invalid rx-clock-name property\n"); + return -EINVAL; + } + } else { + dev_err(&pdev->dev, "Invalid rx-clock-name property\n"); + return -EINVAL; + } + + sprop = of_get_property(np, "tx-clock-name", NULL); + if (sprop) { + uh_info->uf_info.tx_clock = qe_clock_source(sprop); + if ((uh_info->uf_info.tx_clock < QE_CLK_NONE) || + (uh_info->uf_info.tx_clock > QE_CLK24)) { + dev_err(&pdev->dev, "Invalid tx-clock-name property\n"); + return -EINVAL; + } + } else { + dev_err(&pdev->dev, "Invalid tx-clock-name property\n"); + return -EINVAL; + } + + /* use the same clock when work in loopback */ + if (uh_info->uf_info.rx_clock == uh_info->uf_info.tx_clock) + qe_setbrg(uh_info->uf_info.rx_clock, 20000000, 1); + + ret = of_address_to_resource(np, 0, &res); + if (ret) + return -EINVAL; + + uh_info->uf_info.regs = res.start; + uh_info->uf_info.irq = irq_of_parse_and_map(np, 0); + + uhdlc_priv = kzalloc(sizeof(struct ucc_hdlc_private), GFP_KERNEL); + if (!uhdlc_priv) { + ret = -ENOMEM; + dev_err(&pdev->dev, "No mem to alloc hdlc private data\n"); + goto err_alloc_priv; + } + + dev_set_drvdata(&pdev->dev, uhdlc_priv); + uhdlc_priv->dev = &pdev->dev; + uhdlc_priv->uh_info = uh_info; + + if (of_get_property(np, "fsl,tdm-interface", NULL)) + uhdlc_priv->tsa = 1; + + if (of_get_property(np, "fsl,inter-loopback", NULL)) + uhdlc_priv->loopback = 1; + + if (uhdlc_priv->tsa == 1) { + ret = of_parse_hdlc_tdm(np, uhdlc_priv, uh_info); + if (ret) + goto err_miss_tsa_property; + np2 = of_find_node_by_name(NULL, "si"); + if (!np2) { + dev_err(uhdlc_priv->dev, "No si property\n"); + ret = -EINVAL; + goto err_miss_tsa_property; + } + of_address_to_resource(np2, 0, &res); + uhdlc_priv->si_regs = ioremap(res.start, + res.end - res.start + 1); + of_node_put(np2); + + np2 = of_find_node_by_name(NULL, "siram"); + if (!np2) { + ret = -EINVAL; + dev_err(uhdlc_priv->dev, "No siramproperty\n"); + goto err_miss_siram_property; + } + of_address_to_resource(np2, 0 , &res); + uhdlc_priv->siram = ioremap(res.start, res.end - res.start + 1); + of_node_put(np2); + + if (siram_init_flag == 0) { + memset(uhdlc_priv->siram, 0, res.end - res.start + 1); + siram_init_flag = 1; + } + } + + + ret = uhdlc_init(uhdlc_priv); + if (ret) { + dev_err(&pdev->dev, "Failed to init uhdlc\n"); + goto err_hdlc_init; + } + + dev = alloc_hdlcdev(uhdlc_priv); + if (!dev) { + ret = -ENOMEM; + pr_err("ucc_hdlc: unable to allocate memory\n"); + goto err_hdlc_init; + } + + uhdlc_priv->ndev = dev; + hdlc = dev_to_hdlc(dev); + dev->tx_queue_len = 16; + dev->netdev_ops = &uhdlc_ops; + hdlc->attach = ucc_hdlc_attach; + hdlc->xmit = ucc_hdlc_tx; + if (register_hdlc_device(dev)) { + ret = -ENOBUFS; + pr_err("ucc_hdlc: unable to register hdlc device\n"); + free_netdev(dev); + goto err_hdlc_init; + } + +#ifdef DEBUG + dump_priv(uhdlc_priv); + dump_ucc(uhdlc_priv); + dump_bds(uhdlc_priv); + if (uhdlc_priv->tsa) + mem_disp((u8 *)uhdlc_priv->si_regs, 0x20); +#endif + + return 0; + +err_hdlc_init: + if (uhdlc_priv->tsa) { + iounmap(uhdlc_priv->siram); + iounmap(uhdlc_priv->si_regs); + } +err_miss_siram_property: + if (uhdlc_priv->tsa) + iounmap(uhdlc_priv->si_regs); +err_miss_tsa_property: + kfree(uhdlc_priv); +err_alloc_priv: + return ret; + +} + +static int ucc_hdlc_remove(struct platform_device *pdev) +{ + struct ucc_hdlc_private *priv = dev_get_drvdata(&pdev->dev); + + uhdlc_memclean(priv); + + if (priv->si_regs) { + iounmap(priv->si_regs); + priv->si_regs = NULL; + } + + if (priv->siram) { + iounmap(priv->siram); + priv->siram = NULL; + } + kfree(priv); + + dev_info(&pdev->dev, "UCC based hdlc module removed\n"); + + return 0; +} + +static const struct of_device_id fsl_ucc_hdlc_of_match[] = { + { + .compatible = "fsl,ucc_hdlc", + }, + {}, +}; + +MODULE_DEVICE_TABLE(of, fsl_ucc_hdlc_of_match); + +static struct platform_driver ucc_hdlc_driver = { + .probe = ucc_hdlc_probe, + .remove = ucc_hdlc_remove, + .driver = { + .owner = THIS_MODULE, + .name = DRV_NAME, + .pm = HDLC_PM_OPS, + .of_match_table = fsl_ucc_hdlc_of_match, + }, +}; + +static int __init ucc_hdlc_init(void) +{ + return platform_driver_register(&ucc_hdlc_driver); +} + +static void __exit ucc_hdlc_exit(void) +{ + platform_driver_unregister(&ucc_hdlc_driver); +} + +module_init(ucc_hdlc_init); +module_exit(ucc_hdlc_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Freescale Semiconductor Inc."); +MODULE_DESCRIPTION("Driver For Freescale QE UCC HDLC controller"); +MODULE_VERSION("1.0"); diff --git a/drivers/net/wan/fsl_ucc_hdlc.h b/drivers/net/wan/fsl_ucc_hdlc.h new file mode 100644 index 0000000..e0c8a4a --- /dev/null +++ b/drivers/net/wan/fsl_ucc_hdlc.h @@ -0,0 +1,191 @@ +/* Freescale QUICC Engine HDLC Device Driver + * + * Copyright 2014 Freescale Semiconductor Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef CONFIG_UCC_HDLC_H +#define CONFIG_UCC_HDLC_H + +#include <linux/kernel.h> +#include <linux/list.h> + +#include <asm/immap_qe.h> +#include <asm/qe.h> + +#include <asm/ucc.h> +#include <asm/ucc_fast.h> + +/* SI RAM entries */ +#define SIR_LAST 0x0001 +#define SIR_BYTE 0x0002 +#define SIR_CNT(x) ((x) << 2) +#define SIR_CSEL(x) ((x) << 5) +#define SIR_SGS 0x0200 +#define SIR_SWTR 0x4000 +#define SIR_MCC 0x8000 +#define SIR_IDLE 0 + +/* SIxMR fields */ +#define SIMR_SAD(x) ((x) << 12) +#define SIMR_SDM_NORMAL 0x0000 +#define SIMR_SDM_INTERNAL_LOOPBACK 0x0800 +#define SIMR_SDM_MASK 0x0c00 +#define SIMR_CRT 0x0040 +#define SIMR_SL 0x0020 +#define SIMR_CE 0x0010 +#define SIMR_FE 0x0008 +#define SIMR_GM 0x0004 +#define SIMR_TFSD(n) (n) +#define SIMR_RFSD(n) ((n) << 8) + +enum tdm_ts_t { + TDM_TX_TS, + TDM_RX_TS +}; + + +enum tdm_framer_t { + TDM_FRAMER_T1, + TDM_FRAMER_E1 +}; + +enum tdm_mode_t { + TDM_INTERNAL_LOOPBACK, + TDM_NORMAL +}; + +struct ucc_hdlc_param { + __be16 riptr; + __be16 tiptr; + __be16 res0; + __be16 mrblr; + __be32 rstate; + __be32 rbase; + __be16 rbdstat; + __be16 rbdlen; + __be32 rdptr; + __be32 tstate; + __be32 tbase; + __be16 tbdstat; + __be16 tbdlen; + __be32 tdptr; + __be32 rbptr; + __be32 tbptr; + __be32 rcrc; + __be32 res1; + __be32 tcrc; + __be32 res2; + __be32 res3; + __be32 c_mask; + __be32 c_pres; + __be16 disfc; + __be16 crcec; + __be16 abtsc; + __be16 nmarc; + __be32 max_cnt; + __be16 mflr; + __be16 rfthr; + __be16 rfcnt; + __be16 hmask; + __be16 haddr1; + __be16 haddr2; + __be16 haddr3; + __be16 haddr4; + __be16 ts_tmp; + __be16 tmp_mb; +} __attribute__ ((__packed__)); + +struct si_mode_info { + u8 simr_rfsd; + u8 simr_tfsd; + u8 simr_crt; + u8 simr_sl; + u8 simr_ce; + u8 simr_fe; + u8 simr_gm; +}; + +struct ucc_hdlc_info { + struct ucc_fast_info uf_info; + struct si_mode_info si_info; +}; + +struct ucc_hdlc_private { + struct ucc_hdlc_info *uh_info; + struct ucc_fast_private *uccf; + struct device *dev; + struct net_device *ndev; + struct ucc_fast __iomem *uf_regs; /* UCC Fast registers */ + struct si1 __iomem *si_regs; + struct ucc_hdlc_param __iomem *ucc_pram; + u16 tsa; + u16 tdm_port; /* port for this tdm:TDMA,TDMB */ + u32 siram_entry_id; + u16 __iomem *siram; + enum tdm_mode_t tdm_mode; + enum tdm_framer_t tdm_framer_type; + bool hdlc_busy; + u8 loopback; + u8 num_of_ts; /* the number of timeslots in this tdm frame */ + u32 tx_ts_mask; /* tx time slot mask */ + u32 rx_ts_mask; /* rx time slot mask */ + u8 *rx_buffer; /* buffer used for Rx by the HDLC */ + u8 *tx_buffer; /* buffer used for Tx by the HDLC */ + dma_addr_t dma_rx_addr; /* dma mapped buffer for HDLC Rx */ + dma_addr_t dma_tx_addr; /* dma mapped buffer for HDLC Tx */ + struct qe_bd *tx_bd_base; + struct qe_bd *rx_bd_base; + struct qe_bd *curtx_bd; + struct qe_bd *currx_bd; + struct qe_bd *dirty_tx; + struct sk_buff **tx_skbuff; + struct sk_buff **rx_skbuff; + u16 skb_currx; + u16 skb_curtx; + u16 currx_bdnum; + unsigned short skb_dirtytx; + unsigned short tx_ring_size; + unsigned short rx_ring_size; + u32 ucc_pram_offset; + dma_addr_t dma_rx_bd; + dma_addr_t dma_tx_bd; + + unsigned short encoding; + unsigned short parity; + u32 clocking; +#ifdef CONFIG_PM + struct ucc_hdlc_param *ucc_pram_bak; + u32 gumr; + u8 guemr; + u32 cmxsi1cr_l, cmxsi1cr_h; + u32 cmxsi1syr; + u32 cmxucr[4]; +#endif +}; + +#define TX_BD_RING_LEN 0x10 +#define RX_BD_RING_LEN 0x20 +#define RX_CLEAN_MAX 0x10 +#define NUM_OF_BUF 4 +#define MAX_RX_BUF_LENGTH (48*0x20) +#define ALIGNMENT_OF_UCC_HDLC_PRAM 64 +#define SI_BANK_SIZE 128 +#define MAX_HDLC_NUM 4 +#define BD_LEN_MASK 0xffff +#define HDLC_HEAD_LEN 3 +#define HDLC_CRC_SIZE 2 +#define TX_RING_MOD_MASK(size) (size-1) +#define RX_RING_MOD_MASK(size) (size-1) + +#define HDLC_HEAD_MASK 0x000000ff +#define DEFAULT_HDLC_HEAD 0x68aa4400 +#define DEFAULT_ADDR_MASK 0xffff +#define DEFAULT_HDLC_ADDR 0xaa68 +#define DEFAULT_BROAD_ADDR 0xffff + +#endif |