summaryrefslogtreecommitdiff
path: root/drivers/soc
diff options
context:
space:
mode:
authorZhao Qiang <B45475@freescale.com>2014-10-10 02:38:48 (GMT)
committerMatthew Weigel <Matthew.Weigel@freescale.com>2014-12-11 18:37:47 (GMT)
commit36396c5901f1067d25d9ada16acbd36edb716759 (patch)
tree98b102daa7f09ff6aef61422780f612dec4727c6 /drivers/soc
parent7ba5c48b5faf6fbd16e61263dfd4a380e3902569 (diff)
downloadlinux-fsl-qoriq-36396c5901f1067d25d9ada16acbd36edb716759.tar.xz
qe-uart: modify qe-uart to adapt both powerpc and arm
qe has been supported by arm board ls1021, qe-uart need to be supported by ls1021. modify the code to make qe-uart can work on both powerpc and ls1021. Signed-off-by: Zhao Qiang <B45475@freescale.com> --- upstream link: http://patchwork.ozlabs.org/patch/398471/ it is under discussion. Change-Id: I07a9a091882cd572330b38e7a6e0632aea9a9042 Reviewed-on: http://git.am.freescale.net:8181/21119 Tested-by: Review Code-CDREVIEW <CDREVIEW@freescale.com> Reviewed-by: Xiaobo Xie <X.Xie@freescale.com> Reviewed-by: Zhengxiong Jin <Jason.Jin@freescale.com>
Diffstat (limited to 'drivers/soc')
-rw-r--r--drivers/soc/qe/Kconfig1
-rw-r--r--drivers/soc/qe/qe.c63
-rw-r--r--drivers/soc/qe/qe_common.c2
-rw-r--r--drivers/soc/qe/qe_ic.c7
-rw-r--r--drivers/soc/qe/qe_io.c53
-rw-r--r--drivers/soc/qe/ucc_slow.c40
6 files changed, 81 insertions, 85 deletions
diff --git a/drivers/soc/qe/Kconfig b/drivers/soc/qe/Kconfig
index 49118e1..43b984b 100644
--- a/drivers/soc/qe/Kconfig
+++ b/drivers/soc/qe/Kconfig
@@ -4,7 +4,6 @@
config QUICC_ENGINE
bool "Freescale QUICC Engine (QE) Support"
- depends on FSL_SOC && (PPC32 || PPC64)
select LIB_RHEAP
select CRC32
---help---
diff --git a/drivers/soc/qe/qe.c b/drivers/soc/qe/qe.c
index e0926f5..2aaa5b2 100644
--- a/drivers/soc/qe/qe.c
+++ b/drivers/soc/qe/qe.c
@@ -74,8 +74,8 @@ static phys_addr_t qebase = -1;
phys_addr_t get_qe_base(void)
{
struct device_node *qe;
- int size;
- const u32 *prop;
+ int ret;
+ struct resource res;
if (qebase != -1)
return qebase;
@@ -87,9 +87,9 @@ phys_addr_t get_qe_base(void)
return qebase;
}
- prop = of_get_property(qe, "reg", &size);
- if (prop && size >= sizeof(*prop))
- qebase = of_translate_address(qe, prop);
+ ret = of_address_to_resource(qe, 0, &res);
+ if (!ret)
+ qebase = res.start;
of_node_put(qe);
return qebase;
@@ -121,7 +121,7 @@ int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input)
spin_lock_irqsave(&qe_lock, flags);
if (cmd == QE_RESET) {
- out_be32(&qe_immr->cp.cecr, (u32) (cmd | QE_CR_FLG));
+ iowrite32be((u32) (cmd | QE_CR_FLG), &qe_immr->cp.cecr);
} else {
if (cmd == QE_ASSIGN_PAGE) {
/* Here device is the SNUM, not sub-block */
@@ -138,15 +138,14 @@ int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input)
mcn_shift = QE_CR_MCN_NORMAL_SHIFT;
}
- out_be32(&qe_immr->cp.cecdr, cmd_input);
- out_be32(&qe_immr->cp.cecr,
- (cmd | QE_CR_FLG | ((u32) device << dev_shift) | (u32)
- mcn_protocol << mcn_shift));
+ iowrite32be(cmd_input, &qe_immr->cp.cecdr);
+ iowrite32be((cmd | QE_CR_FLG | ((u32) device << dev_shift) |
+ (u32)mcn_protocol << mcn_shift), &qe_immr->cp.cecr);
}
/* wait for the QE_CR_FLG to clear */
- ret = spin_event_timeout((in_be32(&qe_immr->cp.cecr) & QE_CR_FLG) == 0,
- 100, 0);
+ ret = spin_event_timeout((ioread32be(&qe_immr->cp.cecr)
+ & QE_CR_FLG) == 0, 100, 0);
/* On timeout (e.g. failure), the expression will be false (ret == 0),
otherwise it will be true (ret == 1). */
spin_unlock_irqrestore(&qe_lock, flags);
@@ -170,8 +169,8 @@ static unsigned int brg_clk;
unsigned int qe_get_brg_clk(void)
{
struct device_node *qe;
- int size;
- const u32 *prop;
+ u32 val;
+ int ret;
if (brg_clk)
return brg_clk;
@@ -183,9 +182,9 @@ unsigned int qe_get_brg_clk(void)
return brg_clk;
}
- prop = of_get_property(qe, "brg-frequency", &size);
- if (prop && size == sizeof(*prop))
- brg_clk = *prop;
+ ret = of_property_read_u32_index(qe, "brg-frequency", 0, &val);
+ if (!ret)
+ brg_clk = val;
of_node_put(qe);
@@ -225,7 +224,7 @@ int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier)
tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) |
QE_BRGC_ENABLE | div16;
- out_be32(&qe_immr->brg.brgc[brg - QE_BRG1], tempval);
+ iowrite32be(tempval, &qe_immr->brg.brgc[brg - QE_BRG1]);
return 0;
}
@@ -365,9 +364,9 @@ static int qe_sdma_init(void)
return -ENOMEM;
}
- out_be32(&sdma->sdebcr, (u32) sdma_buf_offset & QE_SDEBCR_BA_MASK);
- out_be32(&sdma->sdmr, (QE_SDMR_GLB_1_MSK |
- (0x1 << QE_SDMR_CEN_SHIFT)));
+ iowrite32be((u32) sdma_buf_offset & QE_SDEBCR_BA_MASK, &sdma->sdebcr);
+ iowrite32be((QE_SDMR_GLB_1_MSK | (0x1 << QE_SDMR_CEN_SHIFT)),
+ &sdma->sdmr);
return 0;
}
@@ -403,14 +402,14 @@ static void qe_upload_microcode(const void *base,
pr_info("qe-FM: uploading microcode '%s'\n", ucode->id);
/* Use auto-increment */
- out_be32(&qe_immr->iram.iadd, be32_to_cpu(ucode->iram_offset) |
- QE_IRAM_IADD_AIE | QE_IRAM_IADD_BADDR);
+ iowrite32be(be32_to_cpu(ucode->iram_offset) | QE_IRAM_IADD_AIE |
+ QE_IRAM_IADD_BADDR, &qe_immr->iram.iadd);
for (i = 0; i < be32_to_cpu(ucode->count); i++)
- out_be32(&qe_immr->iram.idata, be32_to_cpu(code[i]));
+ iowrite32be(be32_to_cpu(code[i]), &qe_immr->iram.idata);
/* Set I-RAM Ready Register */
- out_be32(&qe_immr->iram.iready, be32_to_cpu(QE_IRAM_READY));
+ iowrite32be(be32_to_cpu(QE_IRAM_READY), &qe_immr->iram.iready);
}
/*
@@ -528,11 +527,11 @@ int qe_upload_firmware(const struct qe_firmware *firmware)
u32 trap = be32_to_cpu(ucode->traps[j]);
if (trap)
- out_be32(&qe_immr->rsp[i].tibcr[j], trap);
+ iowrite32be(trap, &qe_immr->rsp[i].tibcr[j]);
}
/* Enable traps */
- out_be32(&qe_immr->rsp[i].eccr, be32_to_cpu(ucode->eccr));
+ iowrite32be(be32_to_cpu(ucode->eccr), &qe_immr->rsp[i].eccr);
}
qe_firmware_uploaded = 1;
@@ -651,9 +650,9 @@ EXPORT_SYMBOL(qe_get_num_of_risc);
unsigned int qe_get_num_of_snums(void)
{
struct device_node *qe;
- int size;
unsigned int num_of_snums;
- const u32 *prop;
+ u32 val;
+ int ret;
num_of_snums = 28; /* The default number of snum for threads is 28 */
qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
@@ -667,9 +666,9 @@ unsigned int qe_get_num_of_snums(void)
return num_of_snums;
}
- prop = of_get_property(qe, "fsl,qe-num-snums", &size);
- if (prop && size == sizeof(*prop)) {
- num_of_snums = *prop;
+ ret = of_property_read_u32_index(qe, "fsl,qe-num-snums", 0, &val);
+ if (!ret) {
+ num_of_snums = val;
if ((num_of_snums < 28) || (num_of_snums > QE_NUM_OF_SNUM)) {
/* No QE ever has fewer than 28 SNUMs */
pr_err("QE: number of snum is invalid\n");
diff --git a/drivers/soc/qe/qe_common.c b/drivers/soc/qe/qe_common.c
index 8e93b88..6bc9b18 100644
--- a/drivers/soc/qe/qe_common.c
+++ b/drivers/soc/qe/qe_common.c
@@ -68,7 +68,7 @@ int qe_muram_init(void)
}
}
- muram_pbase = of_translate_address(np, zero);
+ muram_pbase = (phys_addr_t)of_translate_address(np, zero);
if (muram_pbase == (phys_addr_t)OF_BAD_ADDR) {
pr_err("Cannot translate zero through CPM muram node");
ret = -ENODEV;
diff --git a/drivers/soc/qe/qe_ic.c b/drivers/soc/qe/qe_ic.c
index 1968f22..cc1b8d5 100644
--- a/drivers/soc/qe/qe_ic.c
+++ b/drivers/soc/qe/qe_ic.c
@@ -16,7 +16,10 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/irqdomain.h>
#include <linux/errno.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/stddef.h>
@@ -177,13 +180,13 @@ static struct qe_ic_info qe_ic_info[] = {
static inline u32 qe_ic_read(__be32 __iomem *base, unsigned int reg)
{
- return in_be32(base + (reg >> 2));
+ return ioread32be(base + (reg >> 2));
}
static inline void qe_ic_write(__be32 __iomem *base, unsigned int reg,
u32 value)
{
- out_be32(base + (reg >> 2), value);
+ iowrite32be(value, base + (reg >> 2));
}
static inline struct qe_ic *qe_ic_from_irq(unsigned int virq)
diff --git a/drivers/soc/qe/qe_io.c b/drivers/soc/qe/qe_io.c
index 939e903..7f40d3c 100644
--- a/drivers/soc/qe/qe_io.c
+++ b/drivers/soc/qe/qe_io.c
@@ -24,7 +24,6 @@
#include <linux/io.h>
#include <linux/fsl/qe.h>
#include <asm/prom.h>
-#include <sysdev/fsl_soc.h>
#undef DEBUG
@@ -62,16 +61,16 @@ void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin, int dir,
pin_mask1bit = (u32) (1 << (QE_PIO_PINS - (pin + 1)));
/* Set open drain, if required */
- tmp_val = in_be32(&par_io->cpodr);
+ tmp_val = ioread32be(&par_io->cpodr);
if (open_drain)
- out_be32(&par_io->cpodr, pin_mask1bit | tmp_val);
+ iowrite32be(pin_mask1bit | tmp_val, &par_io->cpodr);
else
- out_be32(&par_io->cpodr, ~pin_mask1bit & tmp_val);
+ iowrite32be(~pin_mask1bit & tmp_val, &par_io->cpodr);
/* define direction */
tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
- in_be32(&par_io->cpdir2) :
- in_be32(&par_io->cpdir1);
+ ioread32be(&par_io->cpdir2) :
+ ioread32be(&par_io->cpdir1);
/* get all bits mask for 2 bit per port */
pin_mask2bits = (u32) (0x3 << (QE_PIO_PINS -
@@ -83,34 +82,30 @@ void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin, int dir,
/* clear and set 2 bits mask */
if (pin > (QE_PIO_PINS / 2) - 1) {
- out_be32(&par_io->cpdir2,
- ~pin_mask2bits & tmp_val);
+ iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir2);
tmp_val &= ~pin_mask2bits;
- out_be32(&par_io->cpdir2, new_mask2bits | tmp_val);
+ iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir2);
} else {
- out_be32(&par_io->cpdir1,
- ~pin_mask2bits & tmp_val);
+ iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir1);
tmp_val &= ~pin_mask2bits;
- out_be32(&par_io->cpdir1, new_mask2bits | tmp_val);
+ iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir1);
}
/* define pin assignment */
tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
- in_be32(&par_io->cppar2) :
- in_be32(&par_io->cppar1);
+ ioread32be(&par_io->cppar2) :
+ ioread32be(&par_io->cppar1);
new_mask2bits = (u32) (assignment << (QE_PIO_PINS -
(pin % (QE_PIO_PINS / 2) + 1) * 2));
/* clear and set 2 bits mask */
if (pin > (QE_PIO_PINS / 2) - 1) {
- out_be32(&par_io->cppar2,
- ~pin_mask2bits & tmp_val);
+ iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar2);
tmp_val &= ~pin_mask2bits;
- out_be32(&par_io->cppar2, new_mask2bits | tmp_val);
+ iowrite32be(new_mask2bits | tmp_val, &par_io->cppar2);
} else {
- out_be32(&par_io->cppar1,
- ~pin_mask2bits & tmp_val);
+ iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar1);
tmp_val &= ~pin_mask2bits;
- out_be32(&par_io->cppar1, new_mask2bits | tmp_val);
+ iowrite32be(new_mask2bits | tmp_val, &par_io->cppar1);
}
}
EXPORT_SYMBOL(__par_io_config_pin);
@@ -138,12 +133,12 @@ int par_io_data_set(u8 port, u8 pin, u8 val)
/* calculate pin location */
pin_mask = (u32) (1 << (QE_PIO_PINS - 1 - pin));
- tmp_val = in_be32(&par_io[port].cpdata);
+ tmp_val = ioread32be(&par_io[port].cpdata);
if (val == 0) /* clear */
- out_be32(&par_io[port].cpdata, ~pin_mask & tmp_val);
+ iowrite32be(~pin_mask & tmp_val, &par_io[port].cpdata);
else /* set */
- out_be32(&par_io[port].cpdata, pin_mask | tmp_val);
+ iowrite32be(pin_mask | tmp_val, &par_io[port].cpdata);
return 0;
}
@@ -200,17 +195,17 @@ static void dump_par_io(void)
pr_info("%s: par_io=%p\n", __func__, par_io);
for (i = 0; i < num_par_io_ports; i++) {
pr_info(" cpodr[%u]=%08x\n", i,
- in_be32(&par_io[i].cpodr));
+ ioread32be(&par_io[i].cpodr));
pr_info(" cpdata[%u]=%08x\n", i,
- in_be32(&par_io[i].cpdata));
+ ioread32be(&par_io[i].cpdata));
pr_info(" cpdir1[%u]=%08x\n", i,
- in_be32(&par_io[i].cpdir1));
+ ioread32be(&par_io[i].cpdir1));
pr_info(" cpdir2[%u]=%08x\n", i,
- in_be32(&par_io[i].cpdir2));
+ ioread32be(&par_io[i].cpdir2));
pr_info(" cppar1[%u]=%08x\n", i,
- in_be32(&par_io[i].cppar1));
+ ioread32be(&par_io[i].cppar1));
pr_info(" cppar2[%u]=%08x\n", i,
- in_be32(&par_io[i].cppar2));
+ ioread32be(&par_io[i].cppar2));
}
}
EXPORT_SYMBOL(dump_par_io);
diff --git a/drivers/soc/qe/ucc_slow.c b/drivers/soc/qe/ucc_slow.c
index d023f19..edb1b2e 100644
--- a/drivers/soc/qe/ucc_slow.c
+++ b/drivers/soc/qe/ucc_slow.c
@@ -46,7 +46,7 @@ EXPORT_SYMBOL(ucc_slow_get_qe_cr_subblock);
void ucc_slow_poll_transmitter_now(struct ucc_slow_private *uccs)
{
- out_be16(&uccs->us_regs->utodr, UCC_SLOW_TOD);
+ iowrite16be(UCC_SLOW_TOD, &uccs->us_regs->utodr);
}
void ucc_slow_graceful_stop_tx(struct ucc_slow_private *uccs)
@@ -88,7 +88,7 @@ void ucc_slow_enable(struct ucc_slow_private *uccs, enum comm_dir mode)
us_regs = uccs->us_regs;
/* Enable reception and/or transmission on this UCC. */
- gumr_l = in_be32(&us_regs->gumr_l);
+ gumr_l = ioread32be(&us_regs->gumr_l);
if (mode & COMM_DIR_TX) {
gumr_l |= UCC_SLOW_GUMR_L_ENT;
uccs->enabled_tx = 1;
@@ -97,7 +97,7 @@ void ucc_slow_enable(struct ucc_slow_private *uccs, enum comm_dir mode)
gumr_l |= UCC_SLOW_GUMR_L_ENR;
uccs->enabled_rx = 1;
}
- out_be32(&us_regs->gumr_l, gumr_l);
+ iowrite32be(gumr_l, &us_regs->gumr_l);
}
EXPORT_SYMBOL(ucc_slow_enable);
@@ -109,7 +109,7 @@ void ucc_slow_disable(struct ucc_slow_private *uccs, enum comm_dir mode)
us_regs = uccs->us_regs;
/* Disable reception and/or transmission on this UCC. */
- gumr_l = in_be32(&us_regs->gumr_l);
+ gumr_l = ioread32be(&us_regs->gumr_l);
if (mode & COMM_DIR_TX) {
gumr_l &= ~UCC_SLOW_GUMR_L_ENT;
uccs->enabled_tx = 0;
@@ -118,7 +118,7 @@ void ucc_slow_disable(struct ucc_slow_private *uccs, enum comm_dir mode)
gumr_l &= ~UCC_SLOW_GUMR_L_ENR;
uccs->enabled_rx = 0;
}
- out_be32(&us_regs->gumr_l, gumr_l);
+ iowrite32be(gumr_l, &us_regs->gumr_l);
}
EXPORT_SYMBOL(ucc_slow_disable);
@@ -209,7 +209,7 @@ int ucc_slow_init(struct ucc_slow_info *us_info,
return ret;
}
- out_be16(&uccs->us_pram->mrblr, us_info->max_rx_buf_length);
+ iowrite16be(us_info->max_rx_buf_length, &uccs->us_pram->mrblr);
INIT_LIST_HEAD(&uccs->confQ);
@@ -239,27 +239,27 @@ int ucc_slow_init(struct ucc_slow_info *us_info,
bd = uccs->confBd = uccs->tx_bd = qe_muram_addr(uccs->tx_base_offset);
for (i = 0; i < us_info->tx_bd_ring_len - 1; i++) {
/* clear bd buffer */
- out_be32(&bd->buf, 0);
+ iowrite32be(0, &bd->buf);
/* set bd status and length */
- out_be32((u32 *) bd, 0);
+ iowrite32be(0, (u32 *) bd);
bd++;
}
/* for last BD set Wrap bit */
- out_be32(&bd->buf, 0);
- out_be32((u32 *) bd, cpu_to_be32(T_W));
+ iowrite32be(0, &bd->buf);
+ iowrite32be(T_W, (u32 *) bd);
/* Init Rx bds */
bd = uccs->rx_bd = qe_muram_addr(uccs->rx_base_offset);
for (i = 0; i < us_info->rx_bd_ring_len - 1; i++) {
/* set bd status and length */
- out_be32((u32 *)bd, 0);
+ iowrite32be(0, (u32 *)bd);
/* clear bd buffer */
- out_be32(&bd->buf, 0);
+ iowrite32be(0, &bd->buf);
bd++;
}
/* for last BD set Wrap bit */
- out_be32((u32 *)bd, cpu_to_be32(R_W));
- out_be32(&bd->buf, 0);
+ iowrite32be(R_W, (u32 *)bd);
+ iowrite32be(0, &bd->buf);
/* Set GUMR (For more details see the hardware spec.). */
/* gumr_h */
@@ -280,7 +280,7 @@ int ucc_slow_init(struct ucc_slow_info *us_info,
gumr |= UCC_SLOW_GUMR_H_TXSY;
if (us_info->rtsm)
gumr |= UCC_SLOW_GUMR_H_RTSM;
- out_be32(&us_regs->gumr_h, gumr);
+ iowrite32be(gumr, &us_regs->gumr_h);
/* gumr_l */
gumr = us_info->tdcr | us_info->rdcr | us_info->tenc | us_info->renc |
@@ -293,7 +293,7 @@ int ucc_slow_init(struct ucc_slow_info *us_info,
gumr |= UCC_SLOW_GUMR_L_TINV;
if (us_info->tend)
gumr |= UCC_SLOW_GUMR_L_TEND;
- out_be32(&us_regs->gumr_l, gumr);
+ iowrite32be(gumr, &us_regs->gumr_l);
/* Function code registers */
@@ -303,8 +303,8 @@ int ucc_slow_init(struct ucc_slow_info *us_info,
uccs->us_pram->rbmr = UCC_BMR_BO_BE;
/* rbase, tbase are offsets from MURAM base */
- out_be16(&uccs->us_pram->rbase, uccs->rx_base_offset);
- out_be16(&uccs->us_pram->tbase, uccs->tx_base_offset);
+ iowrite16be(uccs->rx_base_offset, &uccs->us_pram->rbase);
+ iowrite16be(uccs->tx_base_offset, &uccs->us_pram->tbase);
/* Mux clocking */
/* Grant Support */
@@ -334,14 +334,14 @@ int ucc_slow_init(struct ucc_slow_info *us_info,
}
/* Set interrupt mask register at UCC level. */
- out_be16(&us_regs->uccm, us_info->uccm_mask);
+ iowrite16be(us_info->uccm_mask, &us_regs->uccm);
/* First, clear anything pending at UCC level,
* otherwise, old garbage may come through
* as soon as the dam is opened. */
/* Writing '1' clears */
- out_be16(&us_regs->ucce, 0xffff);
+ iowrite16be(0xffff, &us_regs->ucce);
/* Issue QE Init command */
if (us_info->init_tx && us_info->init_rx)