summaryrefslogtreecommitdiff
path: root/drivers/soc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-11-10 23:00:03 (GMT)
committerLinus Torvalds <torvalds@linux-foundation.org>2015-11-10 23:00:03 (GMT)
commitb44a3d2a85c64208a57362a1728efb58a6556cd6 (patch)
tree293302b3ac918eb75b442fa035eb976850163b1d /drivers/soc
parent56e0464980febfa50432a070261579415c72664e (diff)
parentd13a5c8c4c3dbe299659bcff805f79a2c83e2bbc (diff)
downloadlinux-b44a3d2a85c64208a57362a1728efb58a6556cd6.tar.xz
Merge tag 'armsoc-drivers' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
Pull ARM SoC driver updates from Olof Johansson: "As we've enabled multiplatform kernels on ARM, and greatly done away with the contents under arch/arm/mach-*, there's still need for SoC-related drivers to go somewhere. Many of them go in through other driver trees, but we still have drivers/soc to hold some of the "doesn't fit anywhere" lowlevel code that might be shared between ARM and ARM64 (or just in general makes sense to not have under the architecture directory). This branch contains mostly such code: - Drivers for qualcomm SoCs for SMEM, SMD and SMD-RPM, used to communicate with power management blocks on these SoCs for use by clock, regulator and bus frequency drivers. - Allwinner Reduced Serial Bus driver, again used to communicate with PMICs. - Drivers for ARM's SCPI (System Control Processor). Not to be confused with PSCI (Power State Coordination Interface). SCPI is used to communicate with the assistant embedded cores doing power management, and we have yet to see how many of them will implement this for their hardware vs abstracting in other ways (or not at all like in the past). - To make confusion between SCPI and PSCI more likely, this release also includes an update of PSCI to interface version 1.0. - Rockchip support for power domains. - A driver to talk to the firmware on Raspberry Pi" * tag 'armsoc-drivers' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc: (57 commits) soc: qcom: smd-rpm: Correct size of outgoing message bus: sunxi-rsb: Add driver for Allwinner Reduced Serial Bus bus: sunxi-rsb: Add Allwinner Reduced Serial Bus (RSB) controller bindings ARM: bcm2835: add mutual inclusion protection drivers: psci: make PSCI 1.0 functions initialization version dependent dt-bindings: Correct paths in Rockchip power domains binding document soc: rockchip: power-domain: don't try to print the clock name in error case soc: qcom/smem: add HWSPINLOCK dependency clk: berlin: add cpuclk ARM: berlin: dts: add CLKID_CPU for BG2Q ARM: bcm2835: Add the Raspberry Pi firmware driver soc: qcom: smem: Move RPM message ram out of smem DT node soc: qcom: smd-rpm: Correct the active vs sleep state flagging soc: qcom: smd: delete unneeded of_node_put firmware: qcom-scm: build for correct architecture level soc: qcom: smd: Correct SMEM items for upper channels qcom-scm: add missing prototype for qcom_scm_is_available() qcom-scm: fix endianess issue in __qcom_scm_is_call_available soc: qcom: smd: Reject send of too big packets soc: qcom: smd: Handle big endian CPUs ...
Diffstat (limited to 'drivers/soc')
-rw-r--r--drivers/soc/Kconfig1
-rw-r--r--drivers/soc/Makefile1
-rw-r--r--drivers/soc/qcom/Kconfig17
-rw-r--r--drivers/soc/qcom/smd-rpm.c68
-rw-r--r--drivers/soc/qcom/smd.c296
-rw-r--r--drivers/soc/qcom/smem.c368
-rw-r--r--drivers/soc/rockchip/Kconfig18
-rw-r--r--drivers/soc/rockchip/Makefile4
-rw-r--r--drivers/soc/rockchip/pm_domains.c490
9 files changed, 938 insertions, 325 deletions
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
index c9c0fcc..4e853ed 100644
--- a/drivers/soc/Kconfig
+++ b/drivers/soc/Kconfig
@@ -3,6 +3,7 @@ menu "SOC (System On Chip) specific Drivers"
source "drivers/soc/brcmstb/Kconfig"
source "drivers/soc/mediatek/Kconfig"
source "drivers/soc/qcom/Kconfig"
+source "drivers/soc/rockchip/Kconfig"
source "drivers/soc/sunxi/Kconfig"
source "drivers/soc/ti/Kconfig"
source "drivers/soc/versatile/Kconfig"
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index 4e27f10..f2ba2e9 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_SOC_BRCMSTB) += brcmstb/
obj-$(CONFIG_MACH_DOVE) += dove/
obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/
obj-$(CONFIG_ARCH_QCOM) += qcom/
+obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/
obj-$(CONFIG_ARCH_SUNXI) += sunxi/
obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-$(CONFIG_SOC_TI) += ti/
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index ba47b70..eec7614 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -19,6 +19,15 @@ config QCOM_PM
modes. It interface with various system drivers to put the cores in
low power modes.
+config QCOM_SMEM
+ tristate "Qualcomm Shared Memory Manager (SMEM)"
+ depends on ARCH_QCOM
+ depends on HWSPINLOCK
+ help
+ Say y here to enable support for the Qualcomm Shared Memory Manager.
+ The driver provides an interface to items in a heap shared among all
+ processors in a Qualcomm platform.
+
config QCOM_SMD
tristate "Qualcomm Shared Memory Driver (SMD)"
depends on QCOM_SMEM
@@ -40,11 +49,3 @@ config QCOM_SMD_RPM
Say M here if you want to include support for the Qualcomm RPM as a
module. This will build a module called "qcom-smd-rpm".
-
-config QCOM_SMEM
- tristate "Qualcomm Shared Memory Manager (SMEM)"
- depends on ARCH_QCOM
- help
- Say y here to enable support for the Qualcomm Shared Memory Manager.
- The driver provides an interface to items in a heap shared among all
- processors in a Qualcomm platform.
diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c
index 1392ccf..2969321 100644
--- a/drivers/soc/qcom/smd-rpm.c
+++ b/drivers/soc/qcom/smd-rpm.c
@@ -17,6 +17,7 @@
#include <linux/of_platform.h>
#include <linux/io.h>
#include <linux/interrupt.h>
+#include <linux/slab.h>
#include <linux/soc/qcom/smd.h>
#include <linux/soc/qcom/smd-rpm.h>
@@ -44,8 +45,8 @@ struct qcom_smd_rpm {
* @length: length of the payload
*/
struct qcom_rpm_header {
- u32 service_type;
- u32 length;
+ __le32 service_type;
+ __le32 length;
};
/**
@@ -57,11 +58,11 @@ struct qcom_rpm_header {
* @data_len: length of the payload following this header
*/
struct qcom_rpm_request {
- u32 msg_id;
- u32 flags;
- u32 type;
- u32 id;
- u32 data_len;
+ __le32 msg_id;
+ __le32 flags;
+ __le32 type;
+ __le32 id;
+ __le32 data_len;
};
/**
@@ -74,10 +75,10 @@ struct qcom_rpm_request {
* Multiple of these messages can be stacked in an rpm message.
*/
struct qcom_rpm_message {
- u32 msg_type;
- u32 length;
+ __le32 msg_type;
+ __le32 length;
union {
- u32 msg_id;
+ __le32 msg_id;
u8 message[0];
};
};
@@ -104,30 +105,34 @@ int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm,
static unsigned msg_id = 1;
int left;
int ret;
-
struct {
struct qcom_rpm_header hdr;
struct qcom_rpm_request req;
- u8 payload[count];
- } pkt;
+ u8 payload[];
+ } *pkt;
+ size_t size = sizeof(*pkt) + count;
/* SMD packets to the RPM may not exceed 256 bytes */
- if (WARN_ON(sizeof(pkt) >= 256))
+ if (WARN_ON(size >= 256))
return -EINVAL;
+ pkt = kmalloc(size, GFP_KERNEL);
+ if (!pkt)
+ return -ENOMEM;
+
mutex_lock(&rpm->lock);
- pkt.hdr.service_type = RPM_SERVICE_TYPE_REQUEST;
- pkt.hdr.length = sizeof(struct qcom_rpm_request) + count;
+ pkt->hdr.service_type = cpu_to_le32(RPM_SERVICE_TYPE_REQUEST);
+ pkt->hdr.length = cpu_to_le32(sizeof(struct qcom_rpm_request) + count);
- pkt.req.msg_id = msg_id++;
- pkt.req.flags = BIT(state);
- pkt.req.type = type;
- pkt.req.id = id;
- pkt.req.data_len = count;
- memcpy(pkt.payload, buf, count);
+ pkt->req.msg_id = cpu_to_le32(msg_id++);
+ pkt->req.flags = cpu_to_le32(state);
+ pkt->req.type = cpu_to_le32(type);
+ pkt->req.id = cpu_to_le32(id);
+ pkt->req.data_len = cpu_to_le32(count);
+ memcpy(pkt->payload, buf, count);
- ret = qcom_smd_send(rpm->rpm_channel, &pkt, sizeof(pkt));
+ ret = qcom_smd_send(rpm->rpm_channel, pkt, size);
if (ret)
goto out;
@@ -138,6 +143,7 @@ int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm,
ret = rpm->ack_status;
out:
+ kfree(pkt);
mutex_unlock(&rpm->lock);
return ret;
}
@@ -148,27 +154,29 @@ static int qcom_smd_rpm_callback(struct qcom_smd_device *qsdev,
size_t count)
{
const struct qcom_rpm_header *hdr = data;
+ size_t hdr_length = le32_to_cpu(hdr->length);
const struct qcom_rpm_message *msg;
struct qcom_smd_rpm *rpm = dev_get_drvdata(&qsdev->dev);
const u8 *buf = data + sizeof(struct qcom_rpm_header);
- const u8 *end = buf + hdr->length;
+ const u8 *end = buf + hdr_length;
char msgbuf[32];
int status = 0;
- u32 len;
+ u32 len, msg_length;
- if (hdr->service_type != RPM_SERVICE_TYPE_REQUEST ||
- hdr->length < sizeof(struct qcom_rpm_message)) {
+ if (le32_to_cpu(hdr->service_type) != RPM_SERVICE_TYPE_REQUEST ||
+ hdr_length < sizeof(struct qcom_rpm_message)) {
dev_err(&qsdev->dev, "invalid request\n");
return 0;
}
while (buf < end) {
msg = (struct qcom_rpm_message *)buf;
- switch (msg->msg_type) {
+ msg_length = le32_to_cpu(msg->length);
+ switch (le32_to_cpu(msg->msg_type)) {
case RPM_MSG_TYPE_MSG_ID:
break;
case RPM_MSG_TYPE_ERR:
- len = min_t(u32, ALIGN(msg->length, 4), sizeof(msgbuf));
+ len = min_t(u32, ALIGN(msg_length, 4), sizeof(msgbuf));
memcpy_fromio(msgbuf, msg->message, len);
msgbuf[len - 1] = 0;
@@ -179,7 +187,7 @@ static int qcom_smd_rpm_callback(struct qcom_smd_device *qsdev,
break;
}
- buf = PTR_ALIGN(buf + 2 * sizeof(u32) + msg->length, 4);
+ buf = PTR_ALIGN(buf + 2 * sizeof(u32) + msg_length, 4);
}
rpm->ack_status = status;
diff --git a/drivers/soc/qcom/smd.c b/drivers/soc/qcom/smd.c
index a6155c9..86b598c 100644
--- a/drivers/soc/qcom/smd.c
+++ b/drivers/soc/qcom/smd.c
@@ -65,7 +65,9 @@
*/
struct smd_channel_info;
+struct smd_channel_info_pair;
struct smd_channel_info_word;
+struct smd_channel_info_word_pair;
#define SMD_ALLOC_TBL_COUNT 2
#define SMD_ALLOC_TBL_SIZE 64
@@ -85,8 +87,8 @@ static const struct {
.fifo_base_id = 338
},
{
- .alloc_tbl_id = 14,
- .info_base_id = 266,
+ .alloc_tbl_id = 266,
+ .info_base_id = 138,
.fifo_base_id = 202,
},
};
@@ -151,10 +153,8 @@ enum smd_channel_state {
* @name: name of the channel
* @state: local state of the channel
* @remote_state: remote state of the channel
- * @tx_info: byte aligned outgoing channel info
- * @rx_info: byte aligned incoming channel info
- * @tx_info_word: word aligned outgoing channel info
- * @rx_info_word: word aligned incoming channel info
+ * @info: byte aligned outgoing/incoming channel info
+ * @info_word: word aligned outgoing/incoming channel info
* @tx_lock: lock to make writes to the channel mutually exclusive
* @fblockread_event: wakeup event tied to tx fBLOCKREADINTR
* @tx_fifo: pointer to the outgoing ring buffer
@@ -175,11 +175,8 @@ struct qcom_smd_channel {
enum smd_channel_state state;
enum smd_channel_state remote_state;
- struct smd_channel_info *tx_info;
- struct smd_channel_info *rx_info;
-
- struct smd_channel_info_word *tx_info_word;
- struct smd_channel_info_word *rx_info_word;
+ struct smd_channel_info_pair *info;
+ struct smd_channel_info_word_pair *info_word;
struct mutex tx_lock;
wait_queue_head_t fblockread_event;
@@ -215,7 +212,7 @@ struct qcom_smd {
* Format of the smd_info smem items, for byte aligned channels.
*/
struct smd_channel_info {
- u32 state;
+ __le32 state;
u8 fDSR;
u8 fCTS;
u8 fCD;
@@ -224,46 +221,104 @@ struct smd_channel_info {
u8 fTAIL;
u8 fSTATE;
u8 fBLOCKREADINTR;
- u32 tail;
- u32 head;
+ __le32 tail;
+ __le32 head;
+};
+
+struct smd_channel_info_pair {
+ struct smd_channel_info tx;
+ struct smd_channel_info rx;
};
/*
* Format of the smd_info smem items, for word aligned channels.
*/
struct smd_channel_info_word {
- u32 state;
- u32 fDSR;
- u32 fCTS;
- u32 fCD;
- u32 fRI;
- u32 fHEAD;
- u32 fTAIL;
- u32 fSTATE;
- u32 fBLOCKREADINTR;
- u32 tail;
- u32 head;
+ __le32 state;
+ __le32 fDSR;
+ __le32 fCTS;
+ __le32 fCD;
+ __le32 fRI;
+ __le32 fHEAD;
+ __le32 fTAIL;
+ __le32 fSTATE;
+ __le32 fBLOCKREADINTR;
+ __le32 tail;
+ __le32 head;
};
-#define GET_RX_CHANNEL_INFO(channel, param) \
- (channel->rx_info_word ? \
- channel->rx_info_word->param : \
- channel->rx_info->param)
-
-#define SET_RX_CHANNEL_INFO(channel, param, value) \
- (channel->rx_info_word ? \
- (channel->rx_info_word->param = value) : \
- (channel->rx_info->param = value))
-
-#define GET_TX_CHANNEL_INFO(channel, param) \
- (channel->tx_info_word ? \
- channel->tx_info_word->param : \
- channel->tx_info->param)
+struct smd_channel_info_word_pair {
+ struct smd_channel_info_word tx;
+ struct smd_channel_info_word rx;
+};
-#define SET_TX_CHANNEL_INFO(channel, param, value) \
- (channel->tx_info_word ? \
- (channel->tx_info_word->param = value) : \
- (channel->tx_info->param = value))
+#define GET_RX_CHANNEL_FLAG(channel, param) \
+ ({ \
+ BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u8)); \
+ channel->info_word ? \
+ le32_to_cpu(channel->info_word->rx.param) : \
+ channel->info->rx.param; \
+ })
+
+#define GET_RX_CHANNEL_INFO(channel, param) \
+ ({ \
+ BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u32)); \
+ le32_to_cpu(channel->info_word ? \
+ channel->info_word->rx.param : \
+ channel->info->rx.param); \
+ })
+
+#define SET_RX_CHANNEL_FLAG(channel, param, value) \
+ ({ \
+ BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u8)); \
+ if (channel->info_word) \
+ channel->info_word->rx.param = cpu_to_le32(value); \
+ else \
+ channel->info->rx.param = value; \
+ })
+
+#define SET_RX_CHANNEL_INFO(channel, param, value) \
+ ({ \
+ BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u32)); \
+ if (channel->info_word) \
+ channel->info_word->rx.param = cpu_to_le32(value); \
+ else \
+ channel->info->rx.param = cpu_to_le32(value); \
+ })
+
+#define GET_TX_CHANNEL_FLAG(channel, param) \
+ ({ \
+ BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u8)); \
+ channel->info_word ? \
+ le32_to_cpu(channel->info_word->tx.param) : \
+ channel->info->tx.param; \
+ })
+
+#define GET_TX_CHANNEL_INFO(channel, param) \
+ ({ \
+ BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u32)); \
+ le32_to_cpu(channel->info_word ? \
+ channel->info_word->tx.param : \
+ channel->info->tx.param); \
+ })
+
+#define SET_TX_CHANNEL_FLAG(channel, param, value) \
+ ({ \
+ BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u8)); \
+ if (channel->info_word) \
+ channel->info_word->tx.param = cpu_to_le32(value); \
+ else \
+ channel->info->tx.param = value; \
+ })
+
+#define SET_TX_CHANNEL_INFO(channel, param, value) \
+ ({ \
+ BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u32)); \
+ if (channel->info_word) \
+ channel->info_word->tx.param = cpu_to_le32(value); \
+ else \
+ channel->info->tx.param = cpu_to_le32(value); \
+ })
/**
* struct qcom_smd_alloc_entry - channel allocation entry
@@ -274,9 +329,9 @@ struct smd_channel_info_word {
*/
struct qcom_smd_alloc_entry {
u8 name[20];
- u32 cid;
- u32 flags;
- u32 ref_count;
+ __le32 cid;
+ __le32 flags;
+ __le32 ref_count;
} __packed;
#define SMD_CHANNEL_FLAGS_EDGE_MASK 0xff
@@ -305,14 +360,14 @@ static void qcom_smd_signal_channel(struct qcom_smd_channel *channel)
static void qcom_smd_channel_reset(struct qcom_smd_channel *channel)
{
SET_TX_CHANNEL_INFO(channel, state, SMD_CHANNEL_CLOSED);
- SET_TX_CHANNEL_INFO(channel, fDSR, 0);
- SET_TX_CHANNEL_INFO(channel, fCTS, 0);
- SET_TX_CHANNEL_INFO(channel, fCD, 0);
- SET_TX_CHANNEL_INFO(channel, fRI, 0);
- SET_TX_CHANNEL_INFO(channel, fHEAD, 0);
- SET_TX_CHANNEL_INFO(channel, fTAIL, 0);
- SET_TX_CHANNEL_INFO(channel, fSTATE, 1);
- SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 1);
+ SET_TX_CHANNEL_FLAG(channel, fDSR, 0);
+ SET_TX_CHANNEL_FLAG(channel, fCTS, 0);
+ SET_TX_CHANNEL_FLAG(channel, fCD, 0);
+ SET_TX_CHANNEL_FLAG(channel, fRI, 0);
+ SET_TX_CHANNEL_FLAG(channel, fHEAD, 0);
+ SET_TX_CHANNEL_FLAG(channel, fTAIL, 0);
+ SET_TX_CHANNEL_FLAG(channel, fSTATE, 1);
+ SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 1);
SET_TX_CHANNEL_INFO(channel, head, 0);
SET_TX_CHANNEL_INFO(channel, tail, 0);
@@ -350,12 +405,12 @@ static void qcom_smd_channel_set_state(struct qcom_smd_channel *channel,
dev_dbg(edge->smd->dev, "set_state(%s, %d)\n", channel->name, state);
- SET_TX_CHANNEL_INFO(channel, fDSR, is_open);
- SET_TX_CHANNEL_INFO(channel, fCTS, is_open);
- SET_TX_CHANNEL_INFO(channel, fCD, is_open);
+ SET_TX_CHANNEL_FLAG(channel, fDSR, is_open);
+ SET_TX_CHANNEL_FLAG(channel, fCTS, is_open);
+ SET_TX_CHANNEL_FLAG(channel, fCD, is_open);
SET_TX_CHANNEL_INFO(channel, state, state);
- SET_TX_CHANNEL_INFO(channel, fSTATE, 1);
+ SET_TX_CHANNEL_FLAG(channel, fSTATE, 1);
channel->state = state;
qcom_smd_signal_channel(channel);
@@ -364,20 +419,15 @@ static void qcom_smd_channel_set_state(struct qcom_smd_channel *channel,
/*
* Copy count bytes of data using 32bit accesses, if that's required.
*/
-static void smd_copy_to_fifo(void __iomem *_dst,
- const void *_src,
+static void smd_copy_to_fifo(void __iomem *dst,
+ const void *src,
size_t count,
bool word_aligned)
{
- u32 *dst = (u32 *)_dst;
- u32 *src = (u32 *)_src;
-
if (word_aligned) {
- count /= sizeof(u32);
- while (count--)
- writel_relaxed(*src++, dst++);
+ __iowrite32_copy(dst, src, count / sizeof(u32));
} else {
- memcpy_toio(_dst, _src, count);
+ memcpy_toio(dst, src, count);
}
}
@@ -395,7 +445,7 @@ static void smd_copy_from_fifo(void *_dst,
if (word_aligned) {
count /= sizeof(u32);
while (count--)
- *dst++ = readl_relaxed(src++);
+ *dst++ = __raw_readl(src++);
} else {
memcpy_fromio(_dst, _src, count);
}
@@ -412,7 +462,7 @@ static size_t qcom_smd_channel_peek(struct qcom_smd_channel *channel,
unsigned tail;
size_t len;
- word_aligned = channel->rx_info_word != NULL;
+ word_aligned = channel->info_word;
tail = GET_RX_CHANNEL_INFO(channel, tail);
len = min_t(size_t, count, channel->fifo_size - tail);
@@ -491,7 +541,7 @@ static bool qcom_smd_channel_intr(struct qcom_smd_channel *channel)
{
bool need_state_scan = false;
int remote_state;
- u32 pktlen;
+ __le32 pktlen;
int avail;
int ret;
@@ -502,10 +552,10 @@ static bool qcom_smd_channel_intr(struct qcom_smd_channel *channel)
need_state_scan = true;
}
/* Indicate that we have seen any state change */
- SET_RX_CHANNEL_INFO(channel, fSTATE, 0);
+ SET_RX_CHANNEL_FLAG(channel, fSTATE, 0);
/* Signal waiting qcom_smd_send() about the interrupt */
- if (!GET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR))
+ if (!GET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR))
wake_up_interruptible(&channel->fblockread_event);
/* Don't consume any data until we've opened the channel */
@@ -513,7 +563,7 @@ static bool qcom_smd_channel_intr(struct qcom_smd_channel *channel)
goto out;
/* Indicate that we've seen the new data */
- SET_RX_CHANNEL_INFO(channel, fHEAD, 0);
+ SET_RX_CHANNEL_FLAG(channel, fHEAD, 0);
/* Consume data */
for (;;) {
@@ -522,7 +572,7 @@ static bool qcom_smd_channel_intr(struct qcom_smd_channel *channel)
if (!channel->pkt_size && avail >= SMD_PACKET_HEADER_LEN) {
qcom_smd_channel_peek(channel, &pktlen, sizeof(pktlen));
qcom_smd_channel_advance(channel, SMD_PACKET_HEADER_LEN);
- channel->pkt_size = pktlen;
+ channel->pkt_size = le32_to_cpu(pktlen);
} else if (channel->pkt_size && avail >= channel->pkt_size) {
ret = qcom_smd_channel_recv_single(channel);
if (ret)
@@ -533,10 +583,10 @@ static bool qcom_smd_channel_intr(struct qcom_smd_channel *channel)
}
/* Indicate that we have seen and updated tail */
- SET_RX_CHANNEL_INFO(channel, fTAIL, 1);
+ SET_RX_CHANNEL_FLAG(channel, fTAIL, 1);
/* Signal the remote that we've consumed the data (if requested) */
- if (!GET_RX_CHANNEL_INFO(channel, fBLOCKREADINTR)) {
+ if (!GET_RX_CHANNEL_FLAG(channel, fBLOCKREADINTR)) {
/* Ensure ordering of channel info updates */
wmb();
@@ -627,7 +677,7 @@ static int qcom_smd_write_fifo(struct qcom_smd_channel *channel,
unsigned head;
size_t len;
- word_aligned = channel->tx_info_word != NULL;
+ word_aligned = channel->info_word;
head = GET_TX_CHANNEL_INFO(channel, head);
len = min_t(size_t, count, channel->fifo_size - head);
@@ -665,12 +715,16 @@ static int qcom_smd_write_fifo(struct qcom_smd_channel *channel,
*/
int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len)
{
- u32 hdr[5] = {len,};
+ __le32 hdr[5] = { cpu_to_le32(len), };
int tlen = sizeof(hdr) + len;
int ret;
/* Word aligned channels only accept word size aligned data */
- if (channel->rx_info_word != NULL && len % 4)
+ if (channel->info_word && len % 4)
+ return -EINVAL;
+
+ /* Reject packets that are too big */
+ if (tlen >= channel->fifo_size)
return -EINVAL;
ret = mutex_lock_interruptible(&channel->tx_lock);
@@ -683,7 +737,7 @@ int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len)
goto out;
}
- SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 0);
+ SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 0);
ret = wait_event_interruptible(channel->fblockread_event,
qcom_smd_get_tx_avail(channel) >= tlen ||
@@ -691,15 +745,15 @@ int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len)
if (ret)
goto out;
- SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 1);
+ SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 1);
}
- SET_TX_CHANNEL_INFO(channel, fTAIL, 0);
+ SET_TX_CHANNEL_FLAG(channel, fTAIL, 0);
qcom_smd_write_fifo(channel, hdr, sizeof(hdr));
qcom_smd_write_fifo(channel, data, len);
- SET_TX_CHANNEL_INFO(channel, fHEAD, 1);
+ SET_TX_CHANNEL_FLAG(channel, fHEAD, 1);
/* Ensure ordering of channel info updates */
wmb();
@@ -727,6 +781,19 @@ static struct qcom_smd_driver *to_smd_driver(struct device *dev)
static int qcom_smd_dev_match(struct device *dev, struct device_driver *drv)
{
+ struct qcom_smd_device *qsdev = to_smd_device(dev);
+ struct qcom_smd_driver *qsdrv = container_of(drv, struct qcom_smd_driver, driver);
+ const struct qcom_smd_id *match = qsdrv->smd_match_table;
+ const char *name = qsdev->channel->name;
+
+ if (match) {
+ while (match->name[0]) {
+ if (!strcmp(match->name, name))
+ return 1;
+ match++;
+ }
+ }
+
return of_driver_match_device(dev, drv);
}
@@ -854,10 +921,8 @@ static struct device_node *qcom_smd_match_channel(struct device_node *edge_node,
for_each_available_child_of_node(edge_node, child) {
key = "qcom,smd-channels";
ret = of_property_read_string(child, key, &name);
- if (ret) {
- of_node_put(child);
+ if (ret)
continue;
- }
if (strcmp(name, channel) == 0)
return child;
@@ -880,19 +945,17 @@ static int qcom_smd_create_device(struct qcom_smd_channel *channel)
if (channel->qsdev)
return -EEXIST;
- node = qcom_smd_match_channel(edge->of_node, channel->name);
- if (!node) {
- dev_dbg(smd->dev, "no match for '%s'\n", channel->name);
- return -ENXIO;
- }
-
dev_dbg(smd->dev, "registering '%s'\n", channel->name);
qsdev = kzalloc(sizeof(*qsdev), GFP_KERNEL);
if (!qsdev)
return -ENOMEM;
- dev_set_name(&qsdev->dev, "%s.%s", edge->of_node->name, node->name);
+ node = qcom_smd_match_channel(edge->of_node, channel->name);
+ dev_set_name(&qsdev->dev, "%s.%s",
+ edge->of_node->name,
+ node ? node->name : channel->name);
+
qsdev->dev.parent = smd->dev;
qsdev->dev.bus = &qcom_smd_bus;
qsdev->dev.release = qcom_smd_release_device;
@@ -978,21 +1041,20 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed
spin_lock_init(&channel->recv_lock);
init_waitqueue_head(&channel->fblockread_event);
- ret = qcom_smem_get(edge->remote_pid, smem_info_item, (void **)&info,
- &info_size);
- if (ret)
+ info = qcom_smem_get(edge->remote_pid, smem_info_item, &info_size);
+ if (IS_ERR(info)) {
+ ret = PTR_ERR(info);
goto free_name_and_channel;
+ }
/*
* Use the size of the item to figure out which channel info struct to
* use.
*/
if (info_size == 2 * sizeof(struct smd_channel_info_word)) {
- channel->tx_info_word = info;
- channel->rx_info_word = info + sizeof(struct smd_channel_info_word);
+ channel->info_word = info;
} else if (info_size == 2 * sizeof(struct smd_channel_info)) {
- channel->tx_info = info;
- channel->rx_info = info + sizeof(struct smd_channel_info);
+ channel->info = info;
} else {
dev_err(smd->dev,
"channel info of size %zu not supported\n", info_size);
@@ -1000,10 +1062,11 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed
goto free_name_and_channel;
}
- ret = qcom_smem_get(edge->remote_pid, smem_fifo_item, &fifo_base,
- &fifo_size);
- if (ret)
+ fifo_base = qcom_smem_get(edge->remote_pid, smem_fifo_item, &fifo_size);
+ if (IS_ERR(fifo_base)) {
+ ret = PTR_ERR(fifo_base);
goto free_name_and_channel;
+ }
/* The channel consist of a rx and tx fifo of equal size */
fifo_size /= 2;
@@ -1040,20 +1103,19 @@ static void qcom_discover_channels(struct qcom_smd_edge *edge)
unsigned long flags;
unsigned fifo_id;
unsigned info_id;
- int ret;
int tbl;
int i;
+ u32 eflags, cid;
for (tbl = 0; tbl < SMD_ALLOC_TBL_COUNT; tbl++) {
- ret = qcom_smem_get(edge->remote_pid,
- smem_items[tbl].alloc_tbl_id,
- (void **)&alloc_tbl,
- NULL);
- if (ret < 0)
+ alloc_tbl = qcom_smem_get(edge->remote_pid,
+ smem_items[tbl].alloc_tbl_id, NULL);
+ if (IS_ERR(alloc_tbl))
continue;
for (i = 0; i < SMD_ALLOC_TBL_SIZE; i++) {
entry = &alloc_tbl[i];
+ eflags = le32_to_cpu(entry->flags);
if (test_bit(i, edge->allocated[tbl]))
continue;
@@ -1063,14 +1125,15 @@ static void qcom_discover_channels(struct qcom_smd_edge *edge)
if (!entry->name[0])
continue;
- if (!(entry->flags & SMD_CHANNEL_FLAGS_PACKET))
+ if (!(eflags & SMD_CHANNEL_FLAGS_PACKET))
continue;
- if ((entry->flags & SMD_CHANNEL_FLAGS_EDGE_MASK) != edge->edge_id)
+ if ((eflags & SMD_CHANNEL_FLAGS_EDGE_MASK) != edge->edge_id)
continue;
- info_id = smem_items[tbl].info_base_id + entry->cid;
- fifo_id = smem_items[tbl].fifo_base_id + entry->cid;
+ cid = le32_to_cpu(entry->cid);
+ info_id = smem_items[tbl].info_base_id + cid;
+ fifo_id = smem_items[tbl].fifo_base_id + cid;
channel = qcom_smd_create_channel(edge, info_id, fifo_id, entry->name);
if (IS_ERR(channel))
@@ -1227,11 +1290,12 @@ static int qcom_smd_probe(struct platform_device *pdev)
int num_edges;
int ret;
int i = 0;
+ void *p;
/* Wait for smem */
- ret = qcom_smem_get(QCOM_SMEM_HOST_ANY, smem_items[0].alloc_tbl_id, NULL, NULL);
- if (ret == -EPROBE_DEFER)
- return ret;
+ p = qcom_smem_get(QCOM_SMEM_HOST_ANY, smem_items[0].alloc_tbl_id, NULL);
+ if (PTR_ERR(p) == -EPROBE_DEFER)
+ return PTR_ERR(p);
num_edges = of_get_available_child_count(pdev->dev.of_node);
array_size = sizeof(*smd) + num_edges * sizeof(struct qcom_smd_edge);
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
index 5236518..19019aa 100644
--- a/drivers/soc/qcom/smem.c
+++ b/drivers/soc/qcom/smem.c
@@ -92,9 +92,9 @@
* @params: parameters to the command
*/
struct smem_proc_comm {
- u32 command;
- u32 status;
- u32 params[2];
+ __le32 command;
+ __le32 status;
+ __le32 params[2];
};
/**
@@ -106,10 +106,10 @@ struct smem_proc_comm {
* the default region. bits 0,1 are reserved
*/
struct smem_global_entry {
- u32 allocated;
- u32 offset;
- u32 size;
- u32 aux_base; /* bits 1:0 reserved */
+ __le32 allocated;
+ __le32 offset;
+ __le32 size;
+ __le32 aux_base; /* bits 1:0 reserved */
};
#define AUX_BASE_MASK 0xfffffffc
@@ -125,11 +125,11 @@ struct smem_global_entry {
*/
struct smem_header {
struct smem_proc_comm proc_comm[4];
- u32 version[32];
- u32 initialized;
- u32 free_offset;
- u32 available;
- u32 reserved;
+ __le32 version[32];
+ __le32 initialized;
+ __le32 free_offset;
+ __le32 available;
+ __le32 reserved;
struct smem_global_entry toc[SMEM_ITEM_COUNT];
};
@@ -143,12 +143,12 @@ struct smem_header {
* @reserved: reserved entries for later use
*/
struct smem_ptable_entry {
- u32 offset;
- u32 size;
- u32 flags;
- u16 host0;
- u16 host1;
- u32 reserved[8];
+ __le32 offset;
+ __le32 size;
+ __le32 flags;
+ __le16 host0;
+ __le16 host1;
+ __le32 reserved[8];
};
/**
@@ -160,13 +160,14 @@ struct smem_ptable_entry {
* @entry: list of @smem_ptable_entry for the @num_entries partitions
*/
struct smem_ptable {
- u32 magic;
- u32 version;
- u32 num_entries;
- u32 reserved[5];
+ u8 magic[4];
+ __le32 version;
+ __le32 num_entries;
+ __le32 reserved[5];
struct smem_ptable_entry entry[];
};
-#define SMEM_PTABLE_MAGIC 0x434f5424 /* "$TOC" */
+
+static const u8 SMEM_PTABLE_MAGIC[] = { 0x24, 0x54, 0x4f, 0x43 }; /* "$TOC" */
/**
* struct smem_partition_header - header of the partitions
@@ -181,15 +182,16 @@ struct smem_ptable {
* @reserved: for now reserved entries
*/
struct smem_partition_header {
- u32 magic;
- u16 host0;
- u16 host1;
- u32 size;
- u32 offset_free_uncached;
- u32 offset_free_cached;
- u32 reserved[3];
+ u8 magic[4];
+ __le16 host0;
+ __le16 host1;
+ __le32 size;
+ __le32 offset_free_uncached;
+ __le32 offset_free_cached;
+ __le32 reserved[3];
};
-#define SMEM_PART_MAGIC 0x54525024 /* "$PRT" */
+
+static const u8 SMEM_PART_MAGIC[] = { 0x24, 0x50, 0x52, 0x54 };
/**
* struct smem_private_entry - header of each item in the private partition
@@ -201,12 +203,12 @@ struct smem_partition_header {
* @reserved: for now reserved entry
*/
struct smem_private_entry {
- u16 canary;
- u16 item;
- u32 size; /* includes padding bytes */
- u16 padding_data;
- u16 padding_hdr;
- u32 reserved;
+ u16 canary; /* bytes are the same so no swapping needed */
+ __le16 item;
+ __le32 size; /* includes padding bytes */
+ __le16 padding_data;
+ __le16 padding_hdr;
+ __le32 reserved;
};
#define SMEM_PRIVATE_CANARY 0xa5a5
@@ -242,6 +244,45 @@ struct qcom_smem {
struct smem_region regions[0];
};
+static struct smem_private_entry *
+phdr_to_last_private_entry(struct smem_partition_header *phdr)
+{
+ void *p = phdr;
+
+ return p + le32_to_cpu(phdr->offset_free_uncached);
+}
+
+static void *phdr_to_first_cached_entry(struct smem_partition_header *phdr)
+{
+ void *p = phdr;
+
+ return p + le32_to_cpu(phdr->offset_free_cached);
+}
+
+static struct smem_private_entry *
+phdr_to_first_private_entry(struct smem_partition_header *phdr)
+{
+ void *p = phdr;
+
+ return p + sizeof(*phdr);
+}
+
+static struct smem_private_entry *
+private_entry_next(struct smem_private_entry *e)
+{
+ void *p = e;
+
+ return p + sizeof(*e) + le16_to_cpu(e->padding_hdr) +
+ le32_to_cpu(e->size);
+}
+
+static void *entry_to_item(struct smem_private_entry *e)
+{
+ void *p = e;
+
+ return p + sizeof(*e) + le16_to_cpu(e->padding_hdr);
+}
+
/* Pointer to the one and only smem handle */
static struct qcom_smem *__smem;
@@ -254,16 +295,16 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem,
size_t size)
{
struct smem_partition_header *phdr;
- struct smem_private_entry *hdr;
+ struct smem_private_entry *hdr, *end;
size_t alloc_size;
- void *p;
+ void *cached;
phdr = smem->partitions[host];
+ hdr = phdr_to_first_private_entry(phdr);
+ end = phdr_to_last_private_entry(phdr);
+ cached = phdr_to_first_cached_entry(phdr);
- p = (void *)phdr + sizeof(*phdr);
- while (p < (void *)phdr + phdr->offset_free_uncached) {
- hdr = p;
-
+ while (hdr < end) {
if (hdr->canary != SMEM_PRIVATE_CANARY) {
dev_err(smem->dev,
"Found invalid canary in host %d partition\n",
@@ -271,24 +312,23 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem,
return -EINVAL;
}
- if (hdr->item == item)
+ if (le16_to_cpu(hdr->item) == item)
return -EEXIST;
- p += sizeof(*hdr) + hdr->padding_hdr + hdr->size;
+ hdr = private_entry_next(hdr);
}
/* Check that we don't grow into the cached region */
alloc_size = sizeof(*hdr) + ALIGN(size, 8);
- if (p + alloc_size >= (void *)phdr + phdr->offset_free_cached) {
+ if ((void *)hdr + alloc_size >= cached) {
dev_err(smem->dev, "Out of memory\n");
return -ENOSPC;
}
- hdr = p;
hdr->canary = SMEM_PRIVATE_CANARY;
- hdr->item = item;
- hdr->size = ALIGN(size, 8);
- hdr->padding_data = hdr->size - size;
+ hdr->item = cpu_to_le16(item);
+ hdr->size = cpu_to_le32(ALIGN(size, 8));
+ hdr->padding_data = cpu_to_le16(le32_to_cpu(hdr->size) - size);
hdr->padding_hdr = 0;
/*
@@ -297,7 +337,7 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem,
* gets a consistent view of the linked list.
*/
wmb();
- phdr->offset_free_uncached += alloc_size;
+ le32_add_cpu(&phdr->offset_free_uncached, alloc_size);
return 0;
}
@@ -318,11 +358,11 @@ static int qcom_smem_alloc_global(struct qcom_smem *smem,
return -EEXIST;
size = ALIGN(size, 8);
- if (WARN_ON(size > header->available))
+ if (WARN_ON(size > le32_to_cpu(header->available)))
return -ENOMEM;
entry->offset = header->free_offset;
- entry->size = size;
+ entry->size = cpu_to_le32(size);
/*
* Ensure the header is consistent before we mark the item allocated,
@@ -330,10 +370,10 @@ static int qcom_smem_alloc_global(struct qcom_smem *smem,
* even though they do not take the spinlock on read.
*/
wmb();
- entry->allocated = 1;
+ entry->allocated = cpu_to_le32(1);
- header->free_offset += size;
- header->available -= size;
+ le32_add_cpu(&header->free_offset, size);
+ le32_add_cpu(&header->available, -size);
return 0;
}
@@ -378,10 +418,9 @@ int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
}
EXPORT_SYMBOL(qcom_smem_alloc);
-static int qcom_smem_get_global(struct qcom_smem *smem,
- unsigned item,
- void **ptr,
- size_t *size)
+static void *qcom_smem_get_global(struct qcom_smem *smem,
+ unsigned item,
+ size_t *size)
{
struct smem_header *header;
struct smem_region *area;
@@ -390,100 +429,94 @@ static int qcom_smem_get_global(struct qcom_smem *smem,
unsigned i;
if (WARN_ON(item >= SMEM_ITEM_COUNT))
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
header = smem->regions[0].virt_base;
entry = &header->toc[item];
if (!entry->allocated)
- return -ENXIO;
+ return ERR_PTR(-ENXIO);
- if (ptr != NULL) {
- aux_base = entry->aux_base & AUX_BASE_MASK;
+ aux_base = le32_to_cpu(entry->aux_base) & AUX_BASE_MASK;
- for (i = 0; i < smem->num_regions; i++) {
- area = &smem->regions[i];
+ for (i = 0; i < smem->num_regions; i++) {
+ area = &smem->regions[i];
- if (area->aux_base == aux_base || !aux_base) {
- *ptr = area->virt_base + entry->offset;
- break;
- }
+ if (area->aux_base == aux_base || !aux_base) {
+ if (size != NULL)
+ *size = le32_to_cpu(entry->size);
+ return area->virt_base + le32_to_cpu(entry->offset);
}
}
- if (size != NULL)
- *size = entry->size;
- return 0;
+ return ERR_PTR(-ENOENT);
}
-static int qcom_smem_get_private(struct qcom_smem *smem,
- unsigned host,
- unsigned item,
- void **ptr,
- size_t *size)
+static void *qcom_smem_get_private(struct qcom_smem *smem,
+ unsigned host,
+ unsigned item,
+ size_t *size)
{
struct smem_partition_header *phdr;
- struct smem_private_entry *hdr;
- void *p;
+ struct smem_private_entry *e, *end;
phdr = smem->partitions[host];
+ e = phdr_to_first_private_entry(phdr);
+ end = phdr_to_last_private_entry(phdr);
- p = (void *)phdr + sizeof(*phdr);
- while (p < (void *)phdr + phdr->offset_free_uncached) {
- hdr = p;
-
- if (hdr->canary != SMEM_PRIVATE_CANARY) {
+ while (e < end) {
+ if (e->canary != SMEM_PRIVATE_CANARY) {
dev_err(smem->dev,
"Found invalid canary in host %d partition\n",
host);
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
- if (hdr->item == item) {
- if (ptr != NULL)
- *ptr = p + sizeof(*hdr) + hdr->padding_hdr;
-
+ if (le16_to_cpu(e->item) == item) {
if (size != NULL)
- *size = hdr->size - hdr->padding_data;
+ *size = le32_to_cpu(e->size) -
+ le16_to_cpu(e->padding_data);
- return 0;
+ return entry_to_item(e);
}
- p += sizeof(*hdr) + hdr->padding_hdr + hdr->size;
+ e = private_entry_next(e);
}
- return -ENOENT;
+ return ERR_PTR(-ENOENT);
}
/**
* qcom_smem_get() - resolve ptr of size of a smem item
* @host: the remote processor, or -1
* @item: smem item handle
- * @ptr: pointer to be filled out with address of the item
* @size: pointer to be filled out with size of the item
*
- * Looks up pointer and size of a smem item.
+ * Looks up smem item and returns pointer to it. Size of smem
+ * item is returned in @size.
*/
-int qcom_smem_get(unsigned host, unsigned item, void **ptr, size_t *size)
+void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
{
unsigned long flags;
int ret;
+ void *ptr = ERR_PTR(-EPROBE_DEFER);
if (!__smem)
- return -EPROBE_DEFER;
+ return ptr;
ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
HWSPINLOCK_TIMEOUT,
&flags);
if (ret)
- return ret;
+ return ERR_PTR(ret);
if (host < SMEM_HOST_COUNT && __smem->partitions[host])
- ret = qcom_smem_get_private(__smem, host, item, ptr, size);
+ ptr = qcom_smem_get_private(__smem, host, item, size);
else
- ret = qcom_smem_get_global(__smem, item, ptr, size);
+ ptr = qcom_smem_get_global(__smem, item, size);
hwspin_unlock_irqrestore(__smem->hwlock, &flags);
- return ret;
+
+ return ptr;
}
EXPORT_SYMBOL(qcom_smem_get);
@@ -506,10 +539,11 @@ int qcom_smem_get_free_space(unsigned host)
if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
phdr = __smem->partitions[host];
- ret = phdr->offset_free_cached - phdr->offset_free_uncached;
+ ret = le32_to_cpu(phdr->offset_free_cached) -
+ le32_to_cpu(phdr->offset_free_uncached);
} else {
header = __smem->regions[0].virt_base;
- ret = header->available;
+ ret = le32_to_cpu(header->available);
}
return ret;
@@ -518,13 +552,11 @@ EXPORT_SYMBOL(qcom_smem_get_free_space);
static int qcom_smem_get_sbl_version(struct qcom_smem *smem)
{
- unsigned *versions;
+ __le32 *versions;
size_t size;
- int ret;
- ret = qcom_smem_get_global(smem, SMEM_ITEM_VERSION,
- (void **)&versions, &size);
- if (ret < 0) {
+ versions = qcom_smem_get_global(smem, SMEM_ITEM_VERSION, &size);
+ if (IS_ERR(versions)) {
dev_err(smem->dev, "Unable to read the version item\n");
return -ENOENT;
}
@@ -534,7 +566,7 @@ static int qcom_smem_get_sbl_version(struct qcom_smem *smem)
return -EINVAL;
}
- return versions[SMEM_MASTER_SBL_VERSION_INDEX];
+ return le32_to_cpu(versions[SMEM_MASTER_SBL_VERSION_INDEX]);
}
static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
@@ -544,35 +576,38 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
struct smem_ptable_entry *entry;
struct smem_ptable *ptable;
unsigned remote_host;
+ u32 version, host0, host1;
int i;
ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K;
- if (ptable->magic != SMEM_PTABLE_MAGIC)
+ if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic)))
return 0;
- if (ptable->version != 1) {
+ version = le32_to_cpu(ptable->version);
+ if (version != 1) {
dev_err(smem->dev,
- "Unsupported partition header version %d\n",
- ptable->version);
+ "Unsupported partition header version %d\n", version);
return -EINVAL;
}
- for (i = 0; i < ptable->num_entries; i++) {
+ for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
entry = &ptable->entry[i];
+ host0 = le16_to_cpu(entry->host0);
+ host1 = le16_to_cpu(entry->host1);
- if (entry->host0 != local_host && entry->host1 != local_host)
+ if (host0 != local_host && host1 != local_host)
continue;
- if (!entry->offset)
+ if (!le32_to_cpu(entry->offset))
continue;
- if (!entry->size)
+ if (!le32_to_cpu(entry->size))
continue;
- if (entry->host0 == local_host)
- remote_host = entry->host1;
+ if (host0 == local_host)
+ remote_host = host1;
else
- remote_host = entry->host0;
+ remote_host = host0;
if (remote_host >= SMEM_HOST_COUNT) {
dev_err(smem->dev,
@@ -588,21 +623,24 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
return -EINVAL;
}
- header = smem->regions[0].virt_base + entry->offset;
+ header = smem->regions[0].virt_base + le32_to_cpu(entry->offset);
+ host0 = le16_to_cpu(header->host0);
+ host1 = le16_to_cpu(header->host1);
- if (header->magic != SMEM_PART_MAGIC) {
+ if (memcmp(header->magic, SMEM_PART_MAGIC,
+ sizeof(header->magic))) {
dev_err(smem->dev,
"Partition %d has invalid magic\n", i);
return -EINVAL;
}
- if (header->host0 != local_host && header->host1 != local_host) {
+ if (host0 != local_host && host1 != local_host) {
dev_err(smem->dev,
"Partition %d hosts are invalid\n", i);
return -EINVAL;
}
- if (header->host0 != remote_host && header->host1 != remote_host) {
+ if (host0 != remote_host && host1 != remote_host) {
dev_err(smem->dev,
"Partition %d hosts are invalid\n", i);
return -EINVAL;
@@ -614,7 +652,7 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
return -EINVAL;
}
- if (header->offset_free_uncached > header->size) {
+ if (le32_to_cpu(header->offset_free_uncached) > le32_to_cpu(header->size)) {
dev_err(smem->dev,
"Partition %d has invalid free pointer\n", i);
return -EINVAL;
@@ -626,37 +664,47 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
return 0;
}
-static int qcom_smem_count_mem_regions(struct platform_device *pdev)
+static int qcom_smem_map_memory(struct qcom_smem *smem, struct device *dev,
+ const char *name, int i)
{
- struct resource *res;
- int num_regions = 0;
- int i;
-
- for (i = 0; i < pdev->num_resources; i++) {
- res = &pdev->resource[i];
+ struct device_node *np;
+ struct resource r;
+ int ret;
- if (resource_type(res) == IORESOURCE_MEM)
- num_regions++;
+ np = of_parse_phandle(dev->of_node, name, 0);
+ if (!np) {
+ dev_err(dev, "No %s specified\n", name);
+ return -EINVAL;
}
- return num_regions;
+ ret = of_address_to_resource(np, 0, &r);
+ of_node_put(np);
+ if (ret)
+ return ret;
+
+ smem->regions[i].aux_base = (u32)r.start;
+ smem->regions[i].size = resource_size(&r);
+ smem->regions[i].virt_base = devm_ioremap_nocache(dev, r.start,
+ resource_size(&r));
+ if (!smem->regions[i].virt_base)
+ return -ENOMEM;
+
+ return 0;
}
static int qcom_smem_probe(struct platform_device *pdev)
{
struct smem_header *header;
- struct device_node *np;
struct qcom_smem *smem;
- struct resource *res;
- struct resource r;
size_t array_size;
- int num_regions = 0;
+ int num_regions;
int hwlock_id;
u32 version;
int ret;
- int i;
- num_regions = qcom_smem_count_mem_regions(pdev) + 1;
+ num_regions = 1;
+ if (of_find_property(pdev->dev.of_node, "qcom,rpm-msg-ram", NULL))
+ num_regions++;
array_size = num_regions * sizeof(struct smem_region);
smem = devm_kzalloc(&pdev->dev, sizeof(*smem) + array_size, GFP_KERNEL);
@@ -666,39 +714,17 @@ static int qcom_smem_probe(struct platform_device *pdev)
smem->dev = &pdev->dev;
smem->num_regions = num_regions;
- np = of_parse_phandle(pdev->dev.of_node, "memory-region", 0);
- if (!np) {
- dev_err(&pdev->dev, "No memory-region specified\n");
- return -EINVAL;
- }
-
- ret = of_address_to_resource(np, 0, &r);
- of_node_put(np);
+ ret = qcom_smem_map_memory(smem, &pdev->dev, "memory-region", 0);
if (ret)
return ret;
- smem->regions[0].aux_base = (u32)r.start;
- smem->regions[0].size = resource_size(&r);
- smem->regions[0].virt_base = devm_ioremap_nocache(&pdev->dev,
- r.start,
- resource_size(&r));
- if (!smem->regions[0].virt_base)
- return -ENOMEM;
-
- for (i = 1; i < num_regions; i++) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, i - 1);
-
- smem->regions[i].aux_base = (u32)res->start;
- smem->regions[i].size = resource_size(res);
- smem->regions[i].virt_base = devm_ioremap_nocache(&pdev->dev,
- res->start,
- resource_size(res));
- if (!smem->regions[i].virt_base)
- return -ENOMEM;
- }
+ if (num_regions > 1 && (ret = qcom_smem_map_memory(smem, &pdev->dev,
+ "qcom,rpm-msg-ram", 1)))
+ return ret;
header = smem->regions[0].virt_base;
- if (header->initialized != 1 || header->reserved) {
+ if (le32_to_cpu(header->initialized) != 1 ||
+ le32_to_cpu(header->reserved)) {
dev_err(&pdev->dev, "SMEM is not initialized by SBL\n");
return -EINVAL;
}
@@ -730,8 +756,8 @@ static int qcom_smem_probe(struct platform_device *pdev)
static int qcom_smem_remove(struct platform_device *pdev)
{
- __smem = NULL;
hwspin_lock_free(__smem->hwlock);
+ __smem = NULL;
return 0;
}
diff --git a/drivers/soc/rockchip/Kconfig b/drivers/soc/rockchip/Kconfig
new file mode 100644
index 0000000..7140ff8
--- /dev/null
+++ b/drivers/soc/rockchip/Kconfig
@@ -0,0 +1,18 @@
+if ARCH_ROCKCHIP || COMPILE_TEST
+
+#
+# Rockchip Soc drivers
+#
+config ROCKCHIP_PM_DOMAINS
+ bool "Rockchip generic power domain"
+ depends on PM
+ select PM_GENERIC_DOMAINS
+ help
+ Say y here to enable power domain support.
+ In order to meet high performance and low power requirements, a power
+ management unit is designed or saving power when RK3288 in low power
+ mode. The RK3288 PMU is dedicated for managing the power of the whole chip.
+
+ If unsure, say N.
+
+endif
diff --git a/drivers/soc/rockchip/Makefile b/drivers/soc/rockchip/Makefile
new file mode 100644
index 0000000..3d73d06
--- /dev/null
+++ b/drivers/soc/rockchip/Makefile
@@ -0,0 +1,4 @@
+#
+# Rockchip Soc drivers
+#
+obj-$(CONFIG_ROCKCHIP_PM_DOMAINS) += pm_domains.o
diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c
new file mode 100644
index 0000000..534c589
--- /dev/null
+++ b/drivers/soc/rockchip/pm_domains.c
@@ -0,0 +1,490 @@
+/*
+ * Rockchip Generic power domain support.
+ *
+ * Copyright (c) 2015 ROCKCHIP, Co. Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_domain.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/clk.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <dt-bindings/power/rk3288-power.h>
+
+struct rockchip_domain_info {
+ int pwr_mask;
+ int status_mask;
+ int req_mask;
+ int idle_mask;
+ int ack_mask;
+};
+
+struct rockchip_pmu_info {
+ u32 pwr_offset;
+ u32 status_offset;
+ u32 req_offset;
+ u32 idle_offset;
+ u32 ack_offset;
+
+ u32 core_pwrcnt_offset;
+ u32 gpu_pwrcnt_offset;
+
+ unsigned int core_power_transition_time;
+ unsigned int gpu_power_transition_time;
+
+ int num_domains;
+ const struct rockchip_domain_info *domain_info;
+};
+
+struct rockchip_pm_domain {
+ struct generic_pm_domain genpd;
+ const struct rockchip_domain_info *info;
+ struct rockchip_pmu *pmu;
+ int num_clks;
+ struct clk *clks[];
+};
+
+struct rockchip_pmu {
+ struct device *dev;
+ struct regmap *regmap;
+ const struct rockchip_pmu_info *info;
+ struct mutex mutex; /* mutex lock for pmu */
+ struct genpd_onecell_data genpd_data;
+ struct generic_pm_domain *domains[];
+};
+
+#define to_rockchip_pd(gpd) container_of(gpd, struct rockchip_pm_domain, genpd)
+
+#define DOMAIN(pwr, status, req, idle, ack) \
+{ \
+ .pwr_mask = BIT(pwr), \
+ .status_mask = BIT(status), \
+ .req_mask = BIT(req), \
+ .idle_mask = BIT(idle), \
+ .ack_mask = BIT(ack), \
+}
+
+#define DOMAIN_RK3288(pwr, status, req) \
+ DOMAIN(pwr, status, req, req, (req) + 16)
+
+static bool rockchip_pmu_domain_is_idle(struct rockchip_pm_domain *pd)
+{
+ struct rockchip_pmu *pmu = pd->pmu;
+ const struct rockchip_domain_info *pd_info = pd->info;
+ unsigned int val;
+
+ regmap_read(pmu->regmap, pmu->info->idle_offset, &val);
+ return (val & pd_info->idle_mask) == pd_info->idle_mask;
+}
+
+static int rockchip_pmu_set_idle_request(struct rockchip_pm_domain *pd,
+ bool idle)
+{
+ const struct rockchip_domain_info *pd_info = pd->info;
+ struct rockchip_pmu *pmu = pd->pmu;
+ unsigned int val;
+
+ regmap_update_bits(pmu->regmap, pmu->info->req_offset,
+ pd_info->req_mask, idle ? -1U : 0);
+
+ dsb(sy);
+
+ do {
+ regmap_read(pmu->regmap, pmu->info->ack_offset, &val);
+ } while ((val & pd_info->ack_mask) != (idle ? pd_info->ack_mask : 0));
+
+ while (rockchip_pmu_domain_is_idle(pd) != idle)
+ cpu_relax();
+
+ return 0;
+}
+
+static bool rockchip_pmu_domain_is_on(struct rockchip_pm_domain *pd)
+{
+ struct rockchip_pmu *pmu = pd->pmu;
+ unsigned int val;
+
+ regmap_read(pmu->regmap, pmu->info->status_offset, &val);
+
+ /* 1'b0: power on, 1'b1: power off */
+ return !(val & pd->info->status_mask);
+}
+
+static void rockchip_do_pmu_set_power_domain(struct rockchip_pm_domain *pd,
+ bool on)
+{
+ struct rockchip_pmu *pmu = pd->pmu;
+
+ regmap_update_bits(pmu->regmap, pmu->info->pwr_offset,
+ pd->info->pwr_mask, on ? 0 : -1U);
+
+ dsb(sy);
+
+ while (rockchip_pmu_domain_is_on(pd) != on)
+ cpu_relax();
+}
+
+static int rockchip_pd_power(struct rockchip_pm_domain *pd, bool power_on)
+{
+ int i;
+
+ mutex_lock(&pd->pmu->mutex);
+
+ if (rockchip_pmu_domain_is_on(pd) != power_on) {
+ for (i = 0; i < pd->num_clks; i++)
+ clk_enable(pd->clks[i]);
+
+ if (!power_on) {
+ /* FIXME: add code to save AXI_QOS */
+
+ /* if powering down, idle request to NIU first */
+ rockchip_pmu_set_idle_request(pd, true);
+ }
+
+ rockchip_do_pmu_set_power_domain(pd, power_on);
+
+ if (power_on) {
+ /* if powering up, leave idle mode */
+ rockchip_pmu_set_idle_request(pd, false);
+
+ /* FIXME: add code to restore AXI_QOS */
+ }
+
+ for (i = pd->num_clks - 1; i >= 0; i--)
+ clk_disable(pd->clks[i]);
+ }
+
+ mutex_unlock(&pd->pmu->mutex);
+ return 0;
+}
+
+static int rockchip_pd_power_on(struct generic_pm_domain *domain)
+{
+ struct rockchip_pm_domain *pd = to_rockchip_pd(domain);
+
+ return rockchip_pd_power(pd, true);
+}
+
+static int rockchip_pd_power_off(struct generic_pm_domain *domain)
+{
+ struct rockchip_pm_domain *pd = to_rockchip_pd(domain);
+
+ return rockchip_pd_power(pd, false);
+}
+
+static int rockchip_pd_attach_dev(struct generic_pm_domain *genpd,
+ struct device *dev)
+{
+ struct clk *clk;
+ int i;
+ int error;
+
+ dev_dbg(dev, "attaching to power domain '%s'\n", genpd->name);
+
+ error = pm_clk_create(dev);
+ if (error) {
+ dev_err(dev, "pm_clk_create failed %d\n", error);
+ return error;
+ }
+
+ i = 0;
+ while ((clk = of_clk_get(dev->of_node, i++)) && !IS_ERR(clk)) {
+ dev_dbg(dev, "adding clock '%pC' to list of PM clocks\n", clk);
+ error = pm_clk_add_clk(dev, clk);
+ if (error) {
+ dev_err(dev, "pm_clk_add_clk failed %d\n", error);
+ clk_put(clk);
+ pm_clk_destroy(dev);
+ return error;
+ }
+ }
+
+ return 0;
+}
+
+static void rockchip_pd_detach_dev(struct generic_pm_domain *genpd,
+ struct device *dev)
+{
+ dev_dbg(dev, "detaching from power domain '%s'\n", genpd->name);
+
+ pm_clk_destroy(dev);
+}
+
+static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
+ struct device_node *node)
+{
+ const struct rockchip_domain_info *pd_info;
+ struct rockchip_pm_domain *pd;
+ struct clk *clk;
+ int clk_cnt;
+ int i;
+ u32 id;
+ int error;
+
+ error = of_property_read_u32(node, "reg", &id);
+ if (error) {
+ dev_err(pmu->dev,
+ "%s: failed to retrieve domain id (reg): %d\n",
+ node->name, error);
+ return -EINVAL;
+ }
+
+ if (id >= pmu->info->num_domains) {
+ dev_err(pmu->dev, "%s: invalid domain id %d\n",
+ node->name, id);
+ return -EINVAL;
+ }
+
+ pd_info = &pmu->info->domain_info[id];
+ if (!pd_info) {
+ dev_err(pmu->dev, "%s: undefined domain id %d\n",
+ node->name, id);
+ return -EINVAL;
+ }
+
+ clk_cnt = of_count_phandle_with_args(node, "clocks", "#clock-cells");
+ pd = devm_kzalloc(pmu->dev,
+ sizeof(*pd) + clk_cnt * sizeof(pd->clks[0]),
+ GFP_KERNEL);
+ if (!pd)
+ return -ENOMEM;
+
+ pd->info = pd_info;
+ pd->pmu = pmu;
+
+ for (i = 0; i < clk_cnt; i++) {
+ clk = of_clk_get(node, i);
+ if (IS_ERR(clk)) {
+ error = PTR_ERR(clk);
+ dev_err(pmu->dev,
+ "%s: failed to get clk at index %d: %d\n",
+ node->name, i, error);
+ goto err_out;
+ }
+
+ error = clk_prepare(clk);
+ if (error) {
+ dev_err(pmu->dev,
+ "%s: failed to prepare clk %pC (index %d): %d\n",
+ node->name, clk, i, error);
+ clk_put(clk);
+ goto err_out;
+ }
+
+ pd->clks[pd->num_clks++] = clk;
+
+ dev_dbg(pmu->dev, "added clock '%pC' to domain '%s'\n",
+ clk, node->name);
+ }
+
+ error = rockchip_pd_power(pd, true);
+ if (error) {
+ dev_err(pmu->dev,
+ "failed to power on domain '%s': %d\n",
+ node->name, error);
+ goto err_out;
+ }
+
+ pd->genpd.name = node->name;
+ pd->genpd.power_off = rockchip_pd_power_off;
+ pd->genpd.power_on = rockchip_pd_power_on;
+ pd->genpd.attach_dev = rockchip_pd_attach_dev;
+ pd->genpd.detach_dev = rockchip_pd_detach_dev;
+ pd->genpd.flags = GENPD_FLAG_PM_CLK;
+ pm_genpd_init(&pd->genpd, NULL, false);
+
+ pmu->genpd_data.domains[id] = &pd->genpd;
+ return 0;
+
+err_out:
+ while (--i >= 0) {
+ clk_unprepare(pd->clks[i]);
+ clk_put(pd->clks[i]);
+ }
+ return error;
+}
+
+static void rockchip_pm_remove_one_domain(struct rockchip_pm_domain *pd)
+{
+ int i;
+
+ for (i = 0; i < pd->num_clks; i++) {
+ clk_unprepare(pd->clks[i]);
+ clk_put(pd->clks[i]);
+ }
+
+ /* protect the zeroing of pm->num_clks */
+ mutex_lock(&pd->pmu->mutex);
+ pd->num_clks = 0;
+ mutex_unlock(&pd->pmu->mutex);
+
+ /* devm will free our memory */
+}
+
+static void rockchip_pm_domain_cleanup(struct rockchip_pmu *pmu)
+{
+ struct generic_pm_domain *genpd;
+ struct rockchip_pm_domain *pd;
+ int i;
+
+ for (i = 0; i < pmu->genpd_data.num_domains; i++) {
+ genpd = pmu->genpd_data.domains[i];
+ if (genpd) {
+ pd = to_rockchip_pd(genpd);
+ rockchip_pm_remove_one_domain(pd);
+ }
+ }
+
+ /* devm will free our memory */
+}
+
+static void rockchip_configure_pd_cnt(struct rockchip_pmu *pmu,
+ u32 domain_reg_offset,
+ unsigned int count)
+{
+ /* First configure domain power down transition count ... */
+ regmap_write(pmu->regmap, domain_reg_offset, count);
+ /* ... and then power up count. */
+ regmap_write(pmu->regmap, domain_reg_offset + 4, count);
+}
+
+static int rockchip_pm_domain_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *node;
+ struct device *parent;
+ struct rockchip_pmu *pmu;
+ const struct of_device_id *match;
+ const struct rockchip_pmu_info *pmu_info;
+ int error;
+
+ if (!np) {
+ dev_err(dev, "device tree node not found\n");
+ return -ENODEV;
+ }
+
+ match = of_match_device(dev->driver->of_match_table, dev);
+ if (!match || !match->data) {
+ dev_err(dev, "missing pmu data\n");
+ return -EINVAL;
+ }
+
+ pmu_info = match->data;
+
+ pmu = devm_kzalloc(dev,
+ sizeof(*pmu) +
+ pmu_info->num_domains * sizeof(pmu->domains[0]),
+ GFP_KERNEL);
+ if (!pmu)
+ return -ENOMEM;
+
+ pmu->dev = &pdev->dev;
+ mutex_init(&pmu->mutex);
+
+ pmu->info = pmu_info;
+
+ pmu->genpd_data.domains = pmu->domains;
+ pmu->genpd_data.num_domains = pmu_info->num_domains;
+
+ parent = dev->parent;
+ if (!parent) {
+ dev_err(dev, "no parent for syscon devices\n");
+ return -ENODEV;
+ }
+
+ pmu->regmap = syscon_node_to_regmap(parent->of_node);
+
+ /*
+ * Configure power up and down transition delays for CORE
+ * and GPU domains.
+ */
+ rockchip_configure_pd_cnt(pmu, pmu_info->core_pwrcnt_offset,
+ pmu_info->core_power_transition_time);
+ rockchip_configure_pd_cnt(pmu, pmu_info->gpu_pwrcnt_offset,
+ pmu_info->gpu_power_transition_time);
+
+ error = -ENODEV;
+
+ for_each_available_child_of_node(np, node) {
+ error = rockchip_pm_add_one_domain(pmu, node);
+ if (error) {
+ dev_err(dev, "failed to handle node %s: %d\n",
+ node->name, error);
+ goto err_out;
+ }
+ }
+
+ if (error) {
+ dev_dbg(dev, "no power domains defined\n");
+ goto err_out;
+ }
+
+ of_genpd_add_provider_onecell(np, &pmu->genpd_data);
+
+ return 0;
+
+err_out:
+ rockchip_pm_domain_cleanup(pmu);
+ return error;
+}
+
+static const struct rockchip_domain_info rk3288_pm_domains[] = {
+ [RK3288_PD_VIO] = DOMAIN_RK3288(7, 7, 4),
+ [RK3288_PD_HEVC] = DOMAIN_RK3288(14, 10, 9),
+ [RK3288_PD_VIDEO] = DOMAIN_RK3288(8, 8, 3),
+ [RK3288_PD_GPU] = DOMAIN_RK3288(9, 9, 2),
+};
+
+static const struct rockchip_pmu_info rk3288_pmu = {
+ .pwr_offset = 0x08,
+ .status_offset = 0x0c,
+ .req_offset = 0x10,
+ .idle_offset = 0x14,
+ .ack_offset = 0x14,
+
+ .core_pwrcnt_offset = 0x34,
+ .gpu_pwrcnt_offset = 0x3c,
+
+ .core_power_transition_time = 24, /* 1us */
+ .gpu_power_transition_time = 24, /* 1us */
+
+ .num_domains = ARRAY_SIZE(rk3288_pm_domains),
+ .domain_info = rk3288_pm_domains,
+};
+
+static const struct of_device_id rockchip_pm_domain_dt_match[] = {
+ {
+ .compatible = "rockchip,rk3288-power-controller",
+ .data = (void *)&rk3288_pmu,
+ },
+ { /* sentinel */ },
+};
+
+static struct platform_driver rockchip_pm_domain_driver = {
+ .probe = rockchip_pm_domain_probe,
+ .driver = {
+ .name = "rockchip-pm-domain",
+ .of_match_table = rockchip_pm_domain_dt_match,
+ /*
+ * We can't forcibly eject devices form power domain,
+ * so we can't really remove power domains once they
+ * were added.
+ */
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init rockchip_pm_domain_drv_register(void)
+{
+ return platform_driver_register(&rockchip_pm_domain_driver);
+}
+postcore_initcall(rockchip_pm_domain_drv_register);