summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorPan Jiafei <Jiafei.Pan@freescale.com>2014-07-30 07:41:01 (GMT)
committerMatthew Weigel <Matthew.Weigel@freescale.com>2014-12-11 18:39:17 (GMT)
commitd3515fe84958bd0b20ddddc600de5ea198e597d3 (patch)
treed9044cf7c33ff4b70d6a626bfa668dd0bdebb495 /drivers/net/ethernet
parent2ae30dce496912131c5a5a401cf2ea13e8210b11 (diff)
downloadlinux-fsl-qoriq-d3515fe84958bd0b20ddddc600de5ea198e597d3.tar.xz
capwap: add capwap support
CAPWAP stands for Control and Provisioning of Wireless Access Points. CAPWAP is a standard, interoperable protocol that enables a controller to manage a collection of wireless access points. There are three drivers in Kernel Space: CAPWAP Domain driver, CAPWAP Bridge driver and CAPWAP Tunnel driver. CAPWAP Domain Driver implements configuration for CAPWAP Domain, including PCD configuration and FQs setup. User must configure and initialize CAPWAP domain before CAPWAP Tunnel and CAPWAP Bridge works. CAPWAP Tunnel Driver is a misc device driver, which registers four misc devices for four CAPWAP tunnels: fsl-capwap-ctrl-dtls, fsl-capwap-ctl-n-dtls, fsl-capwap-data-dtls, fsl-capwap-data-n-dtls. The misc device provides file-operation to transmit/receive DTLS/non-DTLS packets. For example, read from the device fsl-capwap-ctrl-dtls can receive DTLS control packet; Write to the device fsl-capwap-ctrl-dtls can transmit DTLS control packet. The CAPWAP Bridge Driver is also a misc device driver; it can bridge packets between PCIe NIC and CAPWAP data tunnels. To support capwap drivers, there should be a shared ethernet port in dts file to use as CAPWAP Ethernet port. Signed-off-by: Pan Jiafei <Jiafei.Pan@freescale.com> Change-Id: I7c99191156ebee5dd43673d20fb5c8469a780546 Reviewed-on: http://git.am.freescale.net:8181/21438 Tested-by: Review Code-CDREVIEW <CDREVIEW@freescale.com> Reviewed-by: Jianhua Xie <jianhua.xie@freescale.com> Reviewed-by: Shengzhou Liu <Shengzhou.Liu@freescale.com>
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/freescale/dpa/Kconfig23
-rw-r--r--drivers/net/ethernet/freescale/dpa/Makefile1
-rw-r--r--drivers/net/ethernet/freescale/dpa/capwap/Makefile51
-rw-r--r--drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap.h62
-rw-r--r--drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_bridge.c712
-rw-r--r--drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_desc.c288
-rw-r--r--drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_desc.h190
-rw-r--r--drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_domain.c1304
-rw-r--r--drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_domain.h179
-rw-r--r--drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_domain_ext.h228
-rw-r--r--drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_fq.c626
-rw-r--r--drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_fq.h44
-rw-r--r--drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_ioctl.c588
-rw-r--r--drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_ioctl.h57
-rw-r--r--drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_op.c96
-rw-r--r--drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_tunnel.c493
-rw-r--r--drivers/net/ethernet/freescale/dpa/capwap/fsl_capwap_br.h53
17 files changed, 4995 insertions, 0 deletions
diff --git a/drivers/net/ethernet/freescale/dpa/Kconfig b/drivers/net/ethernet/freescale/dpa/Kconfig
index 0f27792..a459141 100644
--- a/drivers/net/ethernet/freescale/dpa/Kconfig
+++ b/drivers/net/ethernet/freescale/dpa/Kconfig
@@ -26,6 +26,29 @@ config FSL_DPAA_OFFLINE_PORTS
Choosing this feature will not impact the functionality and/or performance of the system,
so it is safe to have it.
+config FSL_CAPWAP
+ bool "DPAA CAPWAP support"
+ depends on FSL_DPAA_ETH
+ default n
+ ---help---
+ CAPWAP Domain implements an offload mode for CAPWAP encapsulation and decapsulation.
+ There are three drivers: CAPWAP domain driver implements Domain configuration and
+ initialization; CAPWAP bridge driver establish a bridge between PCIe NIC and CAPWAP
+ tunnels; CAPWAP tunnel driver provide a unified APIs to access CAPWAP tunnels.
+if FSL_CAPWAP
+
+config FSL_CAPWAP_BRIDGE_ZMC
+ bool "CAPWAP Bridge use Zero-MM-Copy to get a better performance"
+ depends on USE_HW_SKB
+ default y
+ ---help---
+ If Linux has integrate patch named:"net: use hardware buffer pool to allocate skb",
+ NIC will use BMan buffer to allocated skb, so that there will be no memory copy
+ during the whole process.
+ you can select this option to get a better performance data for CAPWAP Bridge.
+
+endif # FSL_CAPWAP
+
config FSL_DPAA_ETH_JUMBO_FRAME
bool "Optimize for jumbo frames"
default n
diff --git a/drivers/net/ethernet/freescale/dpa/Makefile b/drivers/net/ethernet/freescale/dpa/Makefile
index 3686691..e3389fc 100644
--- a/drivers/net/ethernet/freescale/dpa/Makefile
+++ b/drivers/net/ethernet/freescale/dpa/Makefile
@@ -27,6 +27,7 @@ fsl-dpa-generic-objs := dpaa_eth_generic.o dpaa_eth_generic_sysfs.o \
dpaa_generic_ethtool.o
fsl-mac-objs := mac.o mac-api.o
fsl-oh-objs := offline_port.o
+obj-$(CONFIG_FSL_CAPWAP) += capwap/
# Needed by the tracing framework
CFLAGS_dpaa_eth.o := -I$(src)
diff --git a/drivers/net/ethernet/freescale/dpa/capwap/Makefile b/drivers/net/ethernet/freescale/dpa/capwap/Makefile
new file mode 100644
index 0000000..b53fdfb
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpa/capwap/Makefile
@@ -0,0 +1,51 @@
+################################################################################
+#Copyright 2014 Freescale Semiconductor, Inc.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# Neither the name of Freescale Semiconductor nor the
+# names of its contributors may be used to endorse or promote products
+# derived from this software without specific prior written permission.
+#
+# ALTERNATIVELY, this software may be distributed under the terms of the
+# GNU General Public License ("GPL") as published by the Free Software
+# Foundation, either version 2 of that License or (at your option) any
+# later version.
+#
+# THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+################################################################################
+
+#
+# Makefile for the DPA Offloading driver
+#
+
+ccflags-y += -DVERSION=\"\"
+
+#Include NetComm SW specific definitions
+include $(srctree)/drivers/net/ethernet/freescale/fman/ncsw_config.mk
+
+EXTRA_CFLAGS += \
+ -Idrivers/crypto/caam \
+ -Idrivers/net/ethernet/freescale/fman/src/wrapper \
+ -Idrivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd \
+ -Idrivers/net/ethernet/freescale/fman/Peripherals/FM/inc
+
+obj-$(CONFIG_FSL_CAPWAP) += fsl-dpa-capwap.o
+
+fsl-dpa-capwap-objs := dpaa_capwap_domain.o dpaa_capwap_ioctl.o \
+ dpaa_capwap_desc.o dpaa_capwap_fq.o dpaa_capwap_op.o \
+ dpaa_capwap_tunnel.o dpaa_capwap_bridge.o
diff --git a/drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap.h b/drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap.h
new file mode 100644
index 0000000..032ba13
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap.h
@@ -0,0 +1,62 @@
+ /* Copyright 2014 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DPAA_CAPWAP_H__
+#define __DPAA_CAPWAP_H__
+
+enum qman_cb_dqrr_result __hot
+capwap_control_dtls_rx_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq);
+
+
+enum qman_cb_dqrr_result __hot
+capwap_control_n_dtls_rx_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq);
+
+enum qman_cb_dqrr_result __hot
+capwap_data_dtls_rx_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq);
+
+enum qman_cb_dqrr_result __hot
+capwap_data_n_dtls_rx_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq);
+
+void dpa_fd_release_sg(const struct net_device *net_dev,
+ const struct qm_fd *fd);
+
+int upload_data_packets(u32 fqid, const struct qm_fd *fd,
+ struct net_device *net_dev);
+
+#endif /* __DPAA_CAPWAP_H__ */
diff --git a/drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_bridge.c b/drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_bridge.c
new file mode 100644
index 0000000..986a717
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_bridge.c
@@ -0,0 +1,712 @@
+ /* Copyright 2014 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/if_vlan.h>
+#include <linux/fs.h>
+#include <linux/fsl_bman.h>
+
+#include "fsl_capwap_br.h"
+#include "dpaa_eth_common.h"
+#include "dpaa_capwap.h"
+#include "dpaa_capwap_domain.h"
+
+#ifdef CAPWAP_HEADER_MANIP
+static const char capwap_hdr[] = {
+ 0x00, 0x28, 0x43, 0x10, 0x00, 0x01, 0x00, 0x00, 0x08, 0xfc,
+ 0x6e, 0x64, 0xe5, 0xd1, 0x56, 0xee, 0xc1, 0x00, 0x00, 0x00
+};
+#define CAPWAP_HEADER_LENGTH 20
+#endif
+
+#define MANIP_EXTRA_SPACE 64
+
+#define ETHERNET_HEADER_LENGTH 14
+
+struct fslbr_if_stats {
+ uint32_t if_rx;
+ uint32_t if_tx;
+ uint32_t br_tx[2];
+ uint32_t br_tx_err[2];
+ uint32_t br_no_buffer_err[2];
+};
+
+struct fslbr_if {
+ struct net_device *dev;
+ struct list_head list;
+ int ifindex;
+ struct net_device *capwap_net_dev;
+ struct qman_fq *br_to_dpaa_fq;
+ struct fslbr_if_stats if_stats;
+};
+
+struct tunnel_stats {
+ uint32_t tunnel_rx;
+ uint32_t tunnel_rx_err;
+ uint32_t tunnel_rx_drop;
+ uint32_t tunnel_upload;
+ uint32_t tunnel_upload_err;
+};
+
+static uint32_t stat_buffer_alloc;
+static uint32_t stat_buffer_free;
+
+#define DATA_DTLS_TUNNEL 0
+#define DATA_N_DTLS_TUNNEL 1
+static struct tunnel_stats fsl_tunnel_stats[2];
+
+static LIST_HEAD(fslbr_iflist);
+static int fslbr_if_count;
+static struct dpaa_capwap_domain *capwap_domain;
+static struct dpa_bp *br_dpa_bp;
+static int encrypt_status; /* 0: non-dtls encrypt, 1: dtls encrypt */
+
+static struct sk_buff *alloc_bman_skb(void *bp, unsigned int length);
+static void free_bman_skb(struct sk_buff *skb);
+
+static inline struct fslbr_if *distribute_to_eth(const struct ethhdr *eth)
+{
+ struct fslbr_if *fslbr_dev;
+
+ list_for_each_entry(fslbr_dev, &fslbr_iflist, list) {
+ if (ether_addr_equal(fslbr_dev->dev->dev_addr, eth->h_source))
+ return fslbr_dev;
+ }
+ return NULL;
+}
+
+static enum qman_cb_dqrr_result __hot
+capwap_dpaa_to_br(const struct qm_fd *fd, struct qman_fq *fq,
+ struct net_device *net_dev, int tunnel_id)
+{
+ struct dpa_priv_s *priv;
+ struct dpa_bp *dpa_bp;
+ struct sk_buff *skb;
+ struct qm_sg_entry *sgt;
+ int i, ret;
+ struct net_device *to_dev = NULL;
+ void *new_buf;
+ ssize_t fd_off = dpa_fd_offset(fd);
+ struct ethhdr *eth = NULL;
+ struct fslbr_if *fslbr_dev;
+ dma_addr_t addr;
+
+ priv = netdev_priv(net_dev);
+
+ dpa_bp = dpa_bpid2pool(fd->bpid);
+ BUG_ON(!dpa_bp);
+
+ if (unlikely(fd->status & FM_FD_STAT_RX_ERRORS) != 0) {
+ if (netif_msg_hw(priv) && net_ratelimit())
+ netdev_warn(net_dev, "FD status = 0x%08x\n",
+ fd->status & FM_FD_STAT_RX_ERRORS);
+
+ fsl_tunnel_stats[tunnel_id].tunnel_rx_err++;
+
+ goto out;
+ }
+
+ fsl_tunnel_stats[tunnel_id].tunnel_rx++;
+
+ if (fd->format == qm_fd_contig) {
+ addr = qm_fd_addr(fd);
+ new_buf = phys_to_virt(addr);
+ eth = new_buf + fd_off;
+ } else if (fd->format == qm_fd_sg) {
+ addr = qm_fd_addr(fd);
+ sgt = phys_to_virt(addr) + dpa_fd_offset(fd);
+ addr = qm_sg_addr(&sgt[0]) + sgt[0].offset;
+ eth = phys_to_virt(addr);
+ }
+
+ if (eth) {
+ fslbr_dev = distribute_to_eth(eth);
+ if (fslbr_dev)
+ to_dev = fslbr_dev->dev;
+ }
+
+ if (to_dev == NULL) {
+ ret = upload_data_packets(fq->fqid, fd, net_dev);
+ if (ret) {
+ fsl_tunnel_stats[tunnel_id].tunnel_upload_err++;
+ goto out;
+ }
+ fsl_tunnel_stats[tunnel_id].tunnel_upload++;
+ return qman_cb_dqrr_consume;
+ }
+
+#ifdef CONFIG_FSL_CAPWAP_BRIDGE_ZMC
+ /* Just use zero-copy for contig frames */
+ if (fd->format == qm_fd_contig) {
+ addr = qm_fd_addr(fd);
+ new_buf = phys_to_virt(addr);
+ skb = build_skb(new_buf, DPA_SKB_SIZE(dpa_bp->size));
+ if (skb) { /* zero copy */
+ skb_reserve(skb, fd_off);
+ skb_put(skb, dpa_fd_length(fd));
+ skb->hw_skb_state |= IS_HW_SKB;
+ skb->hw_skb_state |= HW_SKB_SW_FREE;
+ skb->hw_skb_priv = dpa_bp;
+ skb->free_hw_skb = free_bman_skb;
+ goto skb_copied;
+ }
+ }
+#endif
+
+ skb = netdev_alloc_skb(net_dev,
+ priv->tx_headroom + dpa_fd_length(fd));
+ if (unlikely(skb == NULL)) {
+ if (netif_msg_rx_err(priv) && net_ratelimit())
+ netdev_err(net_dev, "Could not alloc skb\n");
+
+ fsl_tunnel_stats[tunnel_id].tunnel_rx_err++;
+ goto out;
+ }
+
+ skb_reserve(skb, priv->tx_headroom);
+
+ if (fd->format == qm_fd_sg) {
+ addr = qm_fd_addr(fd);
+ sgt = phys_to_virt(addr) + dpa_fd_offset(fd);
+
+ for (i = 0; i < DPA_SGT_MAX_ENTRIES; i++) {
+ BUG_ON(sgt[i].extension);
+
+ /* copy from sgt[i] */
+ memcpy(skb_put(skb, sgt[i].length),
+ phys_to_virt(qm_sg_addr(&sgt[i]) +
+ sgt[i].offset),
+ sgt[i].length);
+ if (sgt[i].final)
+ break;
+ }
+ goto skb_copied;
+ }
+
+ /* otherwise fd->format == qm_fd_contig */
+ /* Fill the SKB */
+ memcpy(skb_put(skb, dpa_fd_length(fd)),
+ phys_to_virt(qm_fd_addr(fd)) +
+ dpa_fd_offset(fd), dpa_fd_length(fd));
+
+skb_copied:
+#ifdef CAPWAP_HEADER_MANIP
+ /* Remove CAPWAP header */
+ skb_pull(skb, CAPWAP_HEADER_LENGTH);
+#endif
+ skb_reset_mac_header(skb);
+
+ if (to_dev) {
+ /* Dropped when frames are larger than MTU */
+ if (skb->len > (to_dev->mtu + ETHERNET_HEADER_LENGTH)) {
+ dev_kfree_skb_any(skb);
+ fsl_tunnel_stats[tunnel_id].tunnel_rx_drop++;
+ goto out;
+ }
+ skb->dev = to_dev;
+#ifdef CONFIG_FSL_CAPWAP_BRIDGE_ZMC
+ if (skb->hw_skb_state & IS_HW_SKB) {
+ dev_queue_xmit(skb);
+ fslbr_dev->if_stats.if_tx++;
+ return qman_cb_dqrr_consume;
+ }
+#endif
+ dev_queue_xmit(skb);
+ fslbr_dev->if_stats.if_tx++;
+ }
+
+out:
+ dpa_fd_release(net_dev, fd);
+
+ return qman_cb_dqrr_consume;
+}
+
+enum qman_cb_dqrr_result __hot
+capwap_data_dtls_rx_dqrr(struct qman_portal *portal, struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ struct net_device *net_dev;
+ struct dpa_priv_s *priv;
+ struct dpa_percpu_priv_s *percpu_priv;
+ const struct qm_fd *fd = &dq->fd;
+
+ net_dev = ((struct dpa_fq *)fq)->net_dev;
+ priv = netdev_priv(net_dev);
+
+ percpu_priv = __this_cpu_ptr(priv->percpu_priv);
+
+ if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal)))
+ return qman_cb_dqrr_stop;
+
+ return capwap_dpaa_to_br(fd, fq, net_dev, DATA_DTLS_TUNNEL);
+}
+
+enum qman_cb_dqrr_result __hot
+capwap_data_n_dtls_rx_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ struct net_device *net_dev;
+ struct dpa_priv_s *priv;
+ struct dpa_percpu_priv_s *percpu_priv;
+ const struct qm_fd *fd = &dq->fd;
+
+ net_dev = ((struct dpa_fq *)fq)->net_dev;
+ priv = netdev_priv(net_dev);
+
+ percpu_priv = __this_cpu_ptr(priv->percpu_priv);
+
+ if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal)))
+ return qman_cb_dqrr_stop;
+
+ return capwap_dpaa_to_br(fd, fq, net_dev, DATA_N_DTLS_TUNNEL);
+}
+
+static int __hot capwap_br_to_dpaa(struct sk_buff *skb,
+ struct fslbr_if *fslbr_dev)
+{
+ struct dpa_bp *dpa_bp;
+ struct bm_buffer bmb;
+ struct dpa_percpu_priv_s *percpu_priv;
+ struct dpa_priv_s *priv;
+ struct qm_fd fd;
+ int queue_mapping;
+ int err;
+ void *dpa_bp_vaddr;
+ int i;
+ struct qman_fq *fq_base, *fq;
+ dma_addr_t addr;
+ struct net_device *net_dev = fslbr_dev->capwap_net_dev;
+ int tunnel_id = encrypt_status ? DATA_DTLS_TUNNEL : DATA_N_DTLS_TUNNEL;
+
+#ifdef CAPWAP_HEADER_MANIP
+ skb_push(skb, skb->mac_len + CAPWAP_HEADER_LENGTH);
+ memcpy(skb->data, capwap_hdr, CAPWAP_HEADER_LENGTH);
+#else
+ skb_push(skb, skb->mac_len);
+#endif
+
+ priv = netdev_priv(net_dev);
+ percpu_priv = __this_cpu_ptr(priv->percpu_priv);
+
+ memset(&fd, 0, sizeof(fd));
+ fd.format = qm_fd_contig;
+
+ queue_mapping = smp_processor_id();
+
+ dpa_bp = priv->dpa_bp;
+
+#ifdef CONFIG_FSL_CAPWAP_BRIDGE_ZMC
+ if (skb->hw_skb_state & IS_HW_SKB) {
+ fd.bpid = dpa_bp->bpid;
+ fd.length20 = skb_headlen(skb);
+ addr = virt_to_phys(skb->head);
+ qm_fd_addr_set64(&fd, addr);
+ fd.offset = skb_headroom(skb);
+ goto skb_copied;
+ }
+#endif
+
+ err = bman_acquire(dpa_bp->pool, &bmb, 1, 0);
+ if (unlikely(err <= 0)) {
+ fslbr_dev->if_stats.br_no_buffer_err[tunnel_id]++;
+ if (err == 0)
+ err = -ENOMEM;
+ goto buf_acquire_failed;
+ }
+ fd.bpid = dpa_bp->bpid;
+
+ fd.length20 = skb_headlen(skb);
+ qm_fd_addr_set64(&fd, bm_buffer_get64(&bmb));
+ fd.offset = priv->tx_headroom + MANIP_EXTRA_SPACE;
+
+ dpa_bp_vaddr = phys_to_virt(bm_buf_addr(&bmb));
+
+ /* Copy the packet payload */
+ skb_copy_from_linear_data(skb,
+ dpa_bp_vaddr + dpa_fd_offset(&fd),
+ dpa_fd_length(&fd));
+
+skb_copied:
+ fq_base = (struct qman_fq *)capwap_domain->fqs->
+ outbound_core_tx_fqs.fq_base;
+ if (encrypt_status)
+ fq = &fq_base[1];
+ else
+ fq = &fq_base[3];
+
+ for (i = 0; i < 100000; i++) {
+ err = qman_enqueue(fq, &fd, 0);
+ if (err != -EBUSY)
+ break;
+ }
+ if (unlikely(err < 0)) {
+ /* TODO differentiate b/w -EBUSY (EQCR full) and other codes? */
+ fslbr_dev->if_stats.br_tx_err[tunnel_id]++;
+ pr_err("fslbr: fsl bridge transmit to dpaa error\n");
+ return err;
+ } else
+ fslbr_dev->if_stats.br_tx[tunnel_id]++;
+
+ return 0;
+
+buf_acquire_failed:
+ /* We're done with the skb */
+ return -ENOMEM;
+}
+
+rx_handler_result_t fslbr_handle_frame(struct sk_buff **pskb)
+{
+ struct sk_buff *skb = *pskb;
+ struct fslbr_if *fslbr_dev;
+ int ret;
+
+ if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
+ return RX_HANDLER_PASS;
+
+ if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
+ goto drop;
+
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
+ return RX_HANDLER_CONSUMED;
+
+ fslbr_dev =
+ (struct fslbr_if *)rcu_dereference(skb->dev->rx_handler_data);
+ fslbr_dev->if_stats.if_rx++;
+
+ if (skb_is_nonlinear(skb)) {
+ pr_warn("CAPWAP Bridge does't support nonlinear skb");
+ goto drop;
+ }
+
+ ret = capwap_br_to_dpaa(skb, fslbr_dev);
+ if (ret)
+ goto drop;
+#ifdef CONFIG_FSL_CAPWAP_BRIDGE_ZMC
+ /* Set use_dpaa_bp_state to free skb without free data memory region*/
+ if (skb->hw_skb_state & IS_HW_SKB)
+ skb->hw_skb_state &= ~HW_SKB_SW_FREE;
+#endif
+drop:
+ kfree_skb(skb);
+
+ return RX_HANDLER_CONSUMED;
+}
+
+static long fslbr_add_del_if(void __user *arg, int isadd)
+{
+ struct net *net = &init_net;
+ struct net_device *dev;
+ struct fslbr_if *fslbr_dev;
+ int ret;
+ int ifindex;
+
+ ret = copy_from_user(&ifindex, arg, sizeof(ifindex));
+ if (ret)
+ return ret;
+
+ dev = __dev_get_by_index(net, ifindex);
+ if (dev == NULL)
+ return -EINVAL;
+
+ if (isadd) {
+ if (fslbr_if_count >= MAX_IF_COUNT)
+ return -EINVAL;
+
+ list_for_each_entry(fslbr_dev, &fslbr_iflist, list)
+ if (fslbr_dev->ifindex == ifindex)
+ return -EBUSY;
+
+ fslbr_dev = kzalloc(sizeof(*fslbr_dev), GFP_KERNEL);
+ if (!fslbr_dev) {
+ pr_err("Failed to add fslbr if\n");
+ return -ENOMEM;
+ }
+
+ fslbr_dev->dev = dev;
+ fslbr_dev->ifindex = ifindex;
+ fslbr_dev->capwap_net_dev = capwap_domain->net_dev;
+ rtnl_lock();
+ ret = netdev_rx_handler_register(dev,
+ fslbr_handle_frame, fslbr_dev);
+ rtnl_unlock();
+ if (ret) {
+ kfree(fslbr_dev);
+ return ret;
+ }
+#ifdef CONFIG_FSL_CAPWAP_BRIDGE_ZMC
+ dev->hw_skb_priv = br_dpa_bp;
+ dev->alloc_hw_skb = alloc_bman_skb;
+ dev->free_hw_skb = free_bman_skb;
+#endif
+ list_add_tail(&fslbr_dev->list, &fslbr_iflist);
+ fslbr_if_count++;
+
+ return 0;
+ } else {
+ list_for_each_entry(fslbr_dev, &fslbr_iflist, list) {
+ if (fslbr_dev->dev == dev) {
+ list_del(&fslbr_dev->list);
+ kfree(fslbr_dev);
+ fslbr_if_count--;
+ rtnl_lock();
+ netdev_rx_handler_unregister(dev);
+ rtnl_unlock();
+ return 0;
+ }
+ }
+ return -EINVAL;
+ }
+}
+
+static long fslbr_list(void __user *arg)
+{
+ /* iflist defines the data to be copied to userspace.
+ * The first "int" data is encryption status,
+ * the second "int" data is the count of interfaces in bridge
+ * the following data are the index list of interfaces in bridge
+ */
+ int iflist[MAX_IF_COUNT + 2];
+ struct fslbr_if *fslbr_dev;
+ int i = 2;
+ long ret = 0;
+
+ iflist[0] = encrypt_status;
+ iflist[1] = fslbr_if_count;
+
+ list_for_each_entry(fslbr_dev, &fslbr_iflist, list) {
+ iflist[i++] = fslbr_dev->ifindex;
+ }
+ ret = copy_to_user(arg, iflist, sizeof(iflist));
+ return ret;
+}
+
+static long fslbr_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
+{
+ void __user *a = (void __user *)arg;
+ int status;
+ int ret;
+
+ switch (cmd) {
+ case FSLBR_IOCTL_IF_ADD:
+ return fslbr_add_del_if(a, 1);
+ case FSLBR_IOCTL_IF_DEL:
+ return fslbr_add_del_if(a, 0);
+ case FSLBR_IOCTL_IF_LIST:
+ return fslbr_list(a);
+ case FSLBR_IOCTL_SET_ENCRYPT:
+ ret = copy_from_user(&status, a, sizeof(int));
+ if (ret)
+ return ret;
+ if (!status || status == 1) {
+ encrypt_status = status;
+ return 0;
+ } else
+ return -EINVAL;
+ }
+ return -EINVAL;
+}
+
+#ifdef CONFIG_COMPAT
+static long fslbr_ioctl_compat(struct file *fp, unsigned int cmd,
+ unsigned long arg)
+{
+ switch (cmd) {
+ default:
+ return fslbr_ioctl(fp, cmd, arg);
+ }
+ return -EINVAL;
+}
+#endif
+
+static ssize_t fslbr_show_statistic(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t bytes = 0;
+ struct fslbr_if *fslbr_dev;
+ int i;
+ uint32_t br_tx[2] = {0, 0};
+ uint32_t br_tx_err[2] = {0, 0};
+ uint32_t br_no_buffer_err[2] = {0, 0};
+
+ list_for_each_entry(fslbr_dev, &fslbr_iflist, list) {
+ bytes += sprintf(buf + bytes, "Eth%d\tRx: %u\tTx: %u\n",
+ fslbr_dev->ifindex,
+ fslbr_dev->if_stats.if_rx,
+ fslbr_dev->if_stats.if_tx);
+ br_tx[0] += fslbr_dev->if_stats.br_tx[0];
+ br_tx[1] += fslbr_dev->if_stats.br_tx[1];
+ br_tx_err[0] += fslbr_dev->if_stats.br_tx_err[0];
+ br_tx_err[1] += fslbr_dev->if_stats.br_tx_err[1];
+ br_no_buffer_err[0] += fslbr_dev->if_stats.br_no_buffer_err[0];
+ br_no_buffer_err[1] += fslbr_dev->if_stats.br_no_buffer_err[1];
+ }
+
+ for (i = 0; i < 2; i++) {
+ if (i == DATA_DTLS_TUNNEL)
+ bytes += sprintf(buf + bytes,
+ "CAPWAP-DATA-DTLS-Tunnel:\n");
+ else
+ bytes += sprintf(buf + bytes,
+ "CAPWAP-N-DATA-DTLS-Tunnel:\n");
+ bytes += sprintf(buf + bytes, "\tRx: %u\n",
+ fsl_tunnel_stats[i].tunnel_rx);
+ bytes += sprintf(buf + bytes, "\tRx Error: %u\n",
+ fsl_tunnel_stats[i].tunnel_rx_err);
+ bytes += sprintf(buf + bytes, "\tRx Drop: %u\n",
+ fsl_tunnel_stats[i].tunnel_rx_drop);
+ bytes += sprintf(buf + bytes, "\tTx: %u\n", br_tx[i]);
+ bytes += sprintf(buf + bytes, "\tTx Error: %u\n", br_tx_err[i]);
+ bytes += sprintf(buf + bytes,
+ "\tTx N-ZZM No Buffer Error: %u-%u\n",
+ br_no_buffer_err[i]);
+ bytes += sprintf(buf + bytes, "\tTx Upload: %u\n",
+ fsl_tunnel_stats[i].tunnel_upload);
+ bytes += sprintf(buf + bytes, "\tTx Upload Error: %u\n",
+ fsl_tunnel_stats[i].tunnel_upload_err);
+ }
+
+ bytes += sprintf(buf + bytes, "BMan Buffer alloced: %u\n",
+ stat_buffer_alloc);
+ bytes += sprintf(buf + bytes, "BMan Buffer freed: %u\n",
+ stat_buffer_free);
+ return bytes;
+}
+
+static DEVICE_ATTR(capwap_bridge, S_IRUGO, fslbr_show_statistic, NULL);
+
+static const struct file_operations fslbr_fops = {
+ .open = simple_open,
+ .unlocked_ioctl = fslbr_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = fslbr_ioctl_compat,
+#endif
+};
+
+static struct miscdevice fslbr_miscdev = {
+ .name = "fsl-br",
+ .fops = &fslbr_fops,
+ .minor = MISC_DYNAMIC_MINOR,
+};
+
+int capwap_br_init(struct dpaa_capwap_domain *domain)
+{
+ int ret = 0;
+ struct dpa_priv_s *priv;
+ struct device *dev;
+
+ fslbr_if_count = 0;
+ encrypt_status = 1;
+ stat_buffer_alloc = 0;
+ stat_buffer_free = 0;
+ memset(fsl_tunnel_stats, 0, sizeof(fsl_tunnel_stats));
+ capwap_domain = domain;
+ priv = netdev_priv(domain->net_dev);
+ br_dpa_bp = priv->dpa_bp;
+
+ ret = misc_register(&fslbr_miscdev);
+ if (ret)
+ pr_err("fslbr: failed to register misc device\n");
+ dev = (&fslbr_miscdev)->this_device;
+ if (device_create_file(dev, &dev_attr_capwap_bridge))
+ dev_err(dev, "Error creating sysfs file\n");
+ return ret;
+}
+
+void capwap_br_exit(void)
+{
+ struct device *dev;
+
+ dev = (&fslbr_miscdev)->this_device;
+ device_remove_file(dev, &dev_attr_capwap_bridge);
+ misc_deregister(&fslbr_miscdev);
+}
+
+#ifdef CONFIG_FSL_CAPWAP_BRIDGE_ZMC
+static struct sk_buff *alloc_bman_skb(void *bp, unsigned int length)
+{
+ struct dpa_bp *dpa_bp = (struct dpa_bp *)bp;
+ void *new_buf;
+ int err;
+ struct bm_buffer bmb;
+ struct sk_buff *skb;
+
+ if (dpa_bp->size < length) {
+ pr_warn("fslbr:bp size smaller than length\n");
+ return NULL;
+ }
+
+ err = bman_acquire(dpa_bp->pool, &bmb, 1, 0);
+ if (unlikely(err <= 0))
+ return NULL;
+
+ new_buf = phys_to_virt(bm_buf_addr(&bmb));
+ skb = build_skb(new_buf, DPA_SKB_SIZE(dpa_bp->size));
+ if (unlikely(!skb)) {
+ while (unlikely(bman_release(dpa_bp->pool, &bmb, 1, 0)))
+ cpu_relax();
+ return NULL;
+ }
+
+ /* Set manip extra space for capwap tunnel */
+ if (skb) {
+ skb_reserve(skb, MANIP_EXTRA_SPACE);
+ skb->hw_skb_state |= HW_SKB_SW_FREE;
+ }
+ stat_buffer_alloc++;
+ return skb;
+
+}
+
+static void free_bman_skb(struct sk_buff *skb)
+{
+ struct dpa_bp *dpa_bp;
+ struct bm_buffer bmb;
+ dma_addr_t addr;
+
+ addr = virt_to_phys(skb->head);
+ bm_buffer_set64(&bmb, addr);
+ if (skb->dev->hw_skb_priv) {
+ dpa_bp = (struct dpa_bp *)skb->hw_skb_priv;
+ while (bman_release(dpa_bp->pool, &bmb, 1, 0))
+ cpu_relax();
+ } else {
+ if (br_dpa_bp) {
+ while (bman_release(br_dpa_bp->pool, &bmb, 1, 0))
+ cpu_relax();
+ }
+ }
+ stat_buffer_free++;
+}
+#endif
diff --git a/drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_desc.c b/drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_desc.c
new file mode 100644
index 0000000..89c048d
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_desc.c
@@ -0,0 +1,288 @@
+/* Copyright 2014 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/version.h>
+#include <linux/platform_device.h>
+
+#include "compat.h"
+#include "desc.h"
+#include "error.h"
+#include "jr.h"
+#include "ctrl.h"
+
+#include "dpaa_capwap_desc.h"
+
+/* If SEC ERA is unknown default to this value */
+#define SEC_DEF_ERA 2 /* like in P4080 */
+
+/* to retrieve a 256 byte aligned buffer address from an address
+ * we need to copy only the first 7 bytes
+ */
+#define ALIGNED_PTR_ADDRESS_SZ (CAAM_PTR_SZ - 1)
+
+#define JOB_DESC_HDR_LEN CAAM_CMD_SZ
+#define SEQ_OUT_PTR_SGF_MASK 0x01000000;
+/* relative offset where the input pointer should be updated in the descriptor*/
+#define IN_PTR_REL_OFF 4 /* words from current location */
+/* dummy pointer value */
+#define DUMMY_PTR_VAL 0x00000000
+#define PTR_LEN 2 /* Descriptor is created only for 8 byte
+ * pointer. PTR_LEN is in words.
+ */
+
+static const struct of_device_id sec_jr_match[] = {
+ {
+ .compatible = "fsl,sec-v4.0-job-ring"
+ }
+};
+
+static struct device *get_jrdev(void)
+{
+ struct device_node *sec_jr_node;
+ struct platform_device *sec_of_jr_dev;
+
+ sec_jr_node = of_find_matching_node(NULL, &sec_jr_match[0]);
+ if (!sec_jr_node) {
+ pr_err("Couln't find the device_node SEC job-ring, check the device tree\n");
+ return NULL;
+ }
+
+ sec_of_jr_dev = of_find_device_by_node(sec_jr_node);
+ if (!sec_of_jr_dev) {
+ pr_err("SEC job-ring of_device null\n");
+ return NULL;
+ }
+
+ return &sec_of_jr_dev->dev;
+}
+
+/* retrieve and store SEC information */
+int get_sec_info(struct dpaa_capwap_sec_info *secinfo)
+{
+ struct device_node *sec_node;
+ const u32 *sec_era;
+ int prop_size;
+
+ sec_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v5.3");
+ if (sec_node)
+ secinfo->sec_ver = SEC_VER_5_3;
+ else {
+ secinfo->sec_ver = SEC_DEF_VER;
+ sec_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+ if (!sec_node) {
+ pr_err("Can't find device node for SEC! Check device tree!\n");
+ return -ENODEV;
+ }
+ }
+
+ sec_era = of_get_property(sec_node, "fsl,sec-era", &prop_size);
+ if (sec_era && prop_size == sizeof(*sec_era) && *sec_era > 0)
+ secinfo->sec_era = *sec_era;
+ else
+ secinfo->sec_era = SEC_DEF_ERA;
+
+ secinfo->jrdev = get_jrdev();
+ if (!secinfo->jrdev)
+ return -ENODEV;
+
+ return 0;
+}
+
+void cnstr_shdsc_dtls_encap(uint32_t *desc,
+ uint16_t *bufsize,
+ struct cipher_params *cipherdata,
+ struct auth_params *authdata,
+ uint32_t data_move_size)
+{
+ uint32_t *key_jump;
+
+ init_sh_desc_pdb(desc, HDR_SAVECTX | HDR_SHARE_SERIAL,
+ sizeof(struct dtls_block_encap_pdb));
+ if (data_move_size) {
+ append_seq_fifo_load(desc, data_move_size, FIFOLD_CLASS_BOTH |
+ FIFOLD_TYPE_NOINFOFIFO);
+ append_seq_fifo_store(desc, FIFOST_TYPE_META_DATA |
+ FIFOST_AUX_TYPE0, data_move_size);
+ }
+ key_jump = append_jump(desc, JUMP_TYPE_LOCAL | CLASS_BOTH |
+ JUMP_TEST_ALL | JUMP_COND_SHRD);
+ /* Append split authentication key */
+ append_key_as_imm(desc, authdata->split_key,
+ authdata->split_key_pad_len,
+ authdata->split_key_len,
+ CLASS_2 | KEY_ENC | KEY_DEST_MDHA_SPLIT);
+ /* Append cipher key */
+ append_key_as_imm(desc, cipherdata->cipher_key,
+ cipherdata->cipher_key_len, cipherdata->cipher_key_len,
+ CLASS_1 | KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump);
+
+ /* Protocol specific operation */
+ append_operation(desc, OP_PCLID_DTLS | OP_TYPE_ENCAP_PROTOCOL |
+ cipherdata->cipher_type);
+
+ *bufsize = desc_len(desc);
+}
+
+void cnstr_shdsc_dtls_decap(uint32_t *desc,
+ uint16_t *bufsize,
+ struct cipher_params *cipherdata,
+ struct auth_params *authdata,
+ uint32_t data_move_size)
+{
+ uint32_t *key_jump;
+
+ init_sh_desc_pdb(desc, HDR_SAVECTX | HDR_SHARE_SERIAL,
+ sizeof(struct dtls_block_decap_pdb));
+ if (data_move_size) {
+ append_seq_fifo_load(desc, data_move_size, FIFOLD_CLASS_BOTH |
+ FIFOLD_TYPE_NOINFOFIFO);
+ append_seq_fifo_store(desc, FIFOST_TYPE_META_DATA |
+ FIFOST_AUX_TYPE0, data_move_size);
+ }
+ key_jump = append_jump(desc, JUMP_TYPE_LOCAL | CLASS_BOTH |
+ JUMP_TEST_ALL | JUMP_COND_SHRD);
+ /* Append split authentication key */
+ append_key_as_imm(desc, authdata->split_key,
+ authdata->split_key_pad_len,
+ authdata->split_key_len,
+ CLASS_2 | KEY_ENC | KEY_DEST_MDHA_SPLIT);
+ /* Append cipher key */
+ append_key_as_imm(desc, cipherdata->cipher_key,
+ cipherdata->cipher_key_len, cipherdata->cipher_key_len,
+ CLASS_1 | KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump);
+
+ /* Protocol specific operation */
+ append_operation(desc, OP_PCLID_DTLS | OP_TYPE_DECAP_PROTOCOL |
+ cipherdata->cipher_type);
+
+ *bufsize = desc_len(desc);
+}
+
+static void split_key_done(struct device *dev, u32 *desc, u32 err,
+ void *context)
+{
+ atomic_t *done = context;
+
+ if (err) {
+ char tmp[CAAM_ERROR_STR_MAX];
+ dev_err(dev, "%s\n", caam_jr_strstatus(tmp, err));
+ }
+ atomic_set(done, 1);
+}
+
+int generate_split_key(struct auth_params *auth_param,
+ struct dpaa_capwap_sec_info *secinfo)
+{
+ struct device *jrdev;
+ dma_addr_t dma_addr_in, dma_addr_out;
+ u32 *desc, timeout = 1000000;
+ atomic_t done;
+ int ret = 0;
+
+ /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512
+ * Running digest size
+ */
+ const u8 mdpadlen[] = {16, 20, 32, 32, 64, 64};
+
+ jrdev = secinfo->jrdev;
+
+ desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
+ if (!desc) {
+ dev_err(jrdev, "Allocate memory failed for split key desc\n");
+ return -ENOMEM;
+ }
+
+ auth_param->split_key_len = mdpadlen[(auth_param->auth_type &
+ OP_ALG_ALGSEL_SUBMASK) >>
+ OP_ALG_ALGSEL_SHIFT] * 2;
+ auth_param->split_key_pad_len = ALIGN(auth_param->split_key_len, 16);
+
+ dma_addr_in = dma_map_single(jrdev, auth_param->auth_key,
+ auth_param->auth_key_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, dma_addr_in)) {
+ dev_err(jrdev, "Unable to DMA map the input key address\n");
+ kfree(desc);
+ return -ENOMEM;
+ }
+
+ dma_addr_out = dma_map_single(jrdev, auth_param->split_key,
+ auth_param->split_key_pad_len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(jrdev, dma_addr_out)) {
+ dev_err(jrdev, "Unable to DMA map the output key address\n");
+ dma_unmap_single(jrdev, dma_addr_in, auth_param->auth_key_len,
+ DMA_TO_DEVICE);
+ kfree(desc);
+ return -ENOMEM;
+ }
+
+ init_job_desc(desc, 0);
+
+ append_key(desc, dma_addr_in, auth_param->auth_key_len,
+ CLASS_2 | KEY_DEST_CLASS_REG);
+
+ /* Sets MDHA up into an HMAC-INIT */
+ append_operation(desc, (OP_ALG_TYPE_CLASS2 << OP_ALG_TYPE_SHIFT) |
+ auth_param->auth_type | OP_ALG_AAI_HMAC |
+ OP_ALG_DECRYPT | OP_ALG_AS_INIT);
+
+ /* Do a FIFO_LOAD of zero, this will trigger the internal key expansion
+ * into both pads inside MDHA
+ */
+ append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
+
+ /* FIFO_STORE with the explicit split-key content store
+ * (0x26 output type)
+ */
+ append_fifo_store(desc, dma_addr_out, auth_param->split_key_len,
+ LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
+
+ atomic_set(&done, 0);
+ ret = caam_jr_enqueue(jrdev, desc, split_key_done, &done);
+
+ while (!atomic_read(&done) && --timeout) {
+ udelay(1);
+ cpu_relax();
+ }
+
+ if (timeout == 0)
+ dev_err(jrdev, "Timeout waiting for job ring to complete\n");
+
+ dma_unmap_single(jrdev, dma_addr_out, auth_param->split_key_pad_len,
+ DMA_FROM_DEVICE);
+ dma_unmap_single(jrdev, dma_addr_in, auth_param->auth_key_len,
+ DMA_TO_DEVICE);
+ kfree(desc);
+ return ret;
+}
diff --git a/drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_desc.h b/drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_desc.h
new file mode 100644
index 0000000..dd76acd
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_desc.h
@@ -0,0 +1,190 @@
+/* Copyright 2014 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DPAA_CAPWAP_DESC_H__
+#define __DPAA_CAPWAP_DESC_H__
+
+#include "pdb.h"
+#include "desc_constr.h"
+
+/* DPA CAPWAP Cipher Parameters */
+struct cipher_params {
+ uint16_t cipher_type; /* Algorithm type as defined by SEC driver */
+ uint8_t *cipher_key; /* Address to the encryption key */
+ uint32_t cipher_key_len; /* Length in bytes of the normal key */
+};
+
+/* DPA CAPWAP Authentication Parameters */
+struct auth_params {
+ uint32_t auth_type; /* Algorithm type as defined by SEC driver */
+ uint8_t *auth_key; /* Address to the normal key */
+ uint32_t auth_key_len; /* Length in bytes of the normal key */
+ uint8_t *split_key; /* Address to the generated split key */
+ uint32_t split_key_len; /* Length in bytes of the split key */
+ uint32_t split_key_pad_len;/* Length in bytes of the padded split key */
+};
+
+struct dpaa_capwap_sec_info {
+ int sec_era; /* SEC ERA information */
+ int sec_ver; /* SEC version information */
+ struct device *jrdev; /* Job ring device */
+};
+
+/* DPA CAPWAP cipher & authentication algorithm identifiers */
+struct capwap_alg_suite {
+ uint32_t auth_alg;
+ uint32_t cipher_alg;
+};
+
+#define CAPWAP_ALGS_ENTRY(auth, cipher) {\
+ .auth_alg = OP_ALG_ALGSEL_ ## auth,\
+ .cipher_alg = OP_PCL_DTLS_ ## cipher\
+}
+
+#define CAPWAP_ALGS {\
+ CAPWAP_ALGS_ENTRY(SHA1, DES_CBC_SHA_2), \
+ CAPWAP_ALGS_ENTRY(MD5, DES_CBC_MD5), \
+ CAPWAP_ALGS_ENTRY(MD5, 3DES_EDE_CBC_MD5), \
+ CAPWAP_ALGS_ENTRY(SHA1, 3DES_EDE_CBC_SHA160), \
+ CAPWAP_ALGS_ENTRY(SHA384, 3DES_EDE_CBC_SHA384), \
+ CAPWAP_ALGS_ENTRY(SHA224, 3DES_EDE_CBC_SHA224), \
+ CAPWAP_ALGS_ENTRY(SHA512, 3DES_EDE_CBC_SHA512), \
+ CAPWAP_ALGS_ENTRY(SHA256, 3DES_EDE_CBC_SHA256), \
+ CAPWAP_ALGS_ENTRY(SHA1, AES_256_CBC_SHA160), \
+ CAPWAP_ALGS_ENTRY(SHA384, AES_256_CBC_SHA384), \
+ CAPWAP_ALGS_ENTRY(SHA224, AES_256_CBC_SHA224), \
+ CAPWAP_ALGS_ENTRY(SHA512, AES_256_CBC_SHA512), \
+ CAPWAP_ALGS_ENTRY(SHA256, AES_256_CBC_SHA256), \
+ CAPWAP_ALGS_ENTRY(SHA1, AES_128_CBC_SHA160), \
+ CAPWAP_ALGS_ENTRY(SHA384, AES_128_CBC_SHA384), \
+ CAPWAP_ALGS_ENTRY(SHA224, AES_128_CBC_SHA224), \
+ CAPWAP_ALGS_ENTRY(SHA512, AES_128_CBC_SHA512), \
+ CAPWAP_ALGS_ENTRY(SHA256, AES_128_CBC_SHA256), \
+ CAPWAP_ALGS_ENTRY(SHA1, AES_192_CBC_SHA160), \
+ CAPWAP_ALGS_ENTRY(SHA384, AES_192_CBC_SHA384), \
+ CAPWAP_ALGS_ENTRY(SHA224, AES_192_CBC_SHA224), \
+ CAPWAP_ALGS_ENTRY(SHA512, AES_192_CBC_SHA512), \
+ CAPWAP_ALGS_ENTRY(SHA256, AES_192_CBC_SHA256) \
+}
+
+struct preheader_t {
+ union {
+ uint32_t word;
+ struct {
+ unsigned int rsls:1;
+ unsigned int rsvd1_15:15;
+ unsigned int rsvd16_24:9;
+ unsigned int idlen:7;
+ } field;
+ } __packed hi;
+
+ union {
+ uint32_t word;
+ struct {
+ unsigned int rsvd32_33:2;
+ unsigned int fsgt:1;
+ unsigned int lng:1;
+ unsigned int offset:2;
+ unsigned int abs:1;
+ unsigned int add_buf:1;
+ uint8_t pool_id;
+ uint16_t pool_buffer_size;
+ } field;
+ } __packed lo;
+} __packed;
+
+struct init_descriptor_header_t {
+ union {
+ uint32_t word;
+ struct {
+ unsigned int ctype:5;
+ unsigned int rsvd5_6:2;
+ unsigned int dnr:1;
+ unsigned int one:1;
+ unsigned int rsvd9:1;
+ unsigned int start_idx:6;
+ unsigned int zro:1;
+ unsigned int rsvd17_18:2;
+ unsigned int sc:1;
+ unsigned int propogate_dnr:1;
+ unsigned int rsvd21:1;
+ unsigned int share:2;
+ unsigned int rsvd24_25:2;
+ unsigned int desc_len:6;
+ } field;
+ } __packed command;
+} __packed;
+
+struct dtls_encap_descriptor_t {
+ struct preheader_t prehdr;
+ struct init_descriptor_header_t deschdr;
+ struct dtls_block_encap_pdb pdb;
+ /* DCL library will fill following info */
+ uint32_t data_move_cmd[3]; /* For Storing Data Move Cmd */
+ uint32_t jump_cmd; /* For Storing Jump Command */
+ uint32_t auth_key[13]; /* Max Space for storing auth Key */
+ uint32_t enc_key[7]; /* Max Space for storing enc Key */
+ uint32_t operation_cmd; /* For operation Command */
+} __packed;
+
+struct dtls_decap_descriptor_t {
+ struct preheader_t prehdr;
+ struct init_descriptor_header_t deschdr;
+ struct dtls_block_decap_pdb pdb;
+ /* DCL library will fill following info */
+ uint32_t data_move_cmd[3]; /* For Storing Data Move Cmd */
+ uint32_t jump_cmd; /* For Storing Jump Command */
+ uint32_t auth_key[13]; /* Max Space for storing auth Key */
+ uint32_t dec_key[7]; /* Max Space for storing dec Key */
+ uint32_t operation_cmd; /* For operation Command */
+} __packed;
+
+#define SEC_DEF_VER 40 /* like in P4080 */
+#define SEC_VER_5_3 53
+
+int get_sec_info(struct dpaa_capwap_sec_info *secinfo);
+
+int generate_split_key(struct auth_params *auth_param,
+ struct dpaa_capwap_sec_info *secinfo);
+
+void cnstr_shdsc_dtls_decap(uint32_t *desc,
+ uint16_t *bufsize,
+ struct cipher_params *cipherdata,
+ struct auth_params *authdata,
+ uint32_t data_move_size);
+
+void cnstr_shdsc_dtls_encap(uint32_t *desc,
+ uint16_t *bufsize,
+ struct cipher_params *cipherdata,
+ struct auth_params *authdata,
+ uint32_t data_move_size);
+
+#endif /* __DPAA_CAPWAP_DESC_H__ */
diff --git a/drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_domain.c b/drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_domain.c
new file mode 100644
index 0000000..308c612
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_domain.c
@@ -0,0 +1,1304 @@
+/* Copyright (c) 2014 Freescale Semiconductor, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <compat.h>
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+#include <linux/of_platform.h>
+#include <linux/fsl_qman.h>
+
+#include "ncsw_ext.h"
+#include "fm_ext.h"
+#include "fm_port_ext.h"
+#include "fm_pcd_ext.h"
+#include "dpaa_capwap_domain.h"
+#include "dpaa_capwap_fq.h"
+
+#define GET_UPPER_TUNNEL_ID(tunnel_id) \
+ (tunnel_id + capwap_domain->max_num_of_tunnels)
+
+#define DTLS_ENCAP_OPTION_W_B (1 << 1) /* 0x02 */
+#define DTLS_ENCAP_OPTION_E_I (1 << 0) /* 0x01 */
+
+#define DTLS_DECAP_OPTION_NO_ARS (0 << 6)
+#define DTLS_DECAP_OPTION_32_ENTRY_ARS (1 << 6)
+#define DTLS_DECAP_OPTION_64_ENTRY_ARS (3 << 6)
+
+struct capwap_alg_suite capwap_algs[] = CAPWAP_ALGS;
+
+static int set_outbound_pcd(struct dpaa_capwap_domain *capwap_domain)
+{
+ t_FmPcdCcNodeParams *cc_node_param;
+ t_FmPcdCcTreeParams *cc_tree_param;
+ t_FmPcdNetEnvParams *net_env_params;
+ t_FmPortPcdParams *pcd_param;
+ t_FmPortPcdCcParams cc_param;
+ struct t_Port *out_op_port;
+ int i = 0;
+ int err = 0;
+
+ out_op_port = &capwap_domain->out_op_port;
+ /* Network Environment initialization */
+ net_env_params = kzalloc(sizeof(t_FmPcdNetEnvParams), GFP_KERNEL);
+ if (!net_env_params)
+ return -ENOMEM;
+ net_env_params->numOfDistinctionUnits = 0;
+ out_op_port->fmPcdInfo.h_NetEnv =
+ FM_PCD_NetEnvCharacteristicsSet(capwap_domain->h_fm_pcd,
+ net_env_params);
+ if (!out_op_port->fmPcdInfo.h_NetEnv) {
+ pr_err("FM_PCD_NetEnvCharacteristicsSet error\n");
+ kfree(net_env_params);
+ return -EINVAL;
+ }
+ kfree(net_env_params);
+
+ /* Nodes & Tree */
+ cc_node_param = kzalloc(sizeof(t_FmPcdCcNodeParams), GFP_KERNEL);
+ if (!cc_node_param)
+ return -ENOMEM;
+ cc_node_param->extractCcParams.type = e_FM_PCD_EXTRACT_NON_HDR;
+ cc_node_param->extractCcParams.extractNonHdr.src =
+ e_FM_PCD_EXTRACT_FROM_FLOW_ID;
+ cc_node_param->extractCcParams.extractNonHdr.action =
+ e_FM_PCD_ACTION_INDEXED_LOOKUP;
+ cc_node_param->extractCcParams.extractNonHdr.offset = 0;
+ cc_node_param->extractCcParams.extractNonHdr.size = 2;
+
+ cc_node_param->keysParams.numOfKeys =
+ capwap_domain->out_op_port.numOfTxQs;
+ cc_node_param->extractCcParams.extractNonHdr.icIndxMask =
+ (uint16_t)((cc_node_param->keysParams.numOfKeys - 1) << 4);
+ cc_node_param->keysParams.keySize = 2;
+ cc_node_param->keysParams.maxNumOfKeys =
+ cc_node_param->keysParams.numOfKeys;
+ cc_node_param->keysParams.statisticsMode =
+ e_FM_PCD_CC_STATS_MODE_BYTE_AND_FRAME;
+
+ for (i = 0; i < cc_node_param->keysParams.numOfKeys; i++) {
+ cc_node_param->keysParams.keyParams[i].ccNextEngineParams.
+ nextEngine = e_FM_PCD_DONE;
+ cc_node_param->keysParams.keyParams[i].ccNextEngineParams.
+ params.enqueueParams.action = e_FM_PCD_DROP_FRAME;
+ cc_node_param->keysParams.keyParams[i].ccNextEngineParams.
+ statisticsEn = TRUE;
+ }
+
+ out_op_port->fmPcdInfo.h_CcNodes[0] =
+ FM_PCD_MatchTableSet(capwap_domain->h_fm_pcd,
+ cc_node_param);
+ if (!out_op_port->fmPcdInfo.h_CcNodes[0]) {
+ pr_err("FM_PCD_MatchTableSet failed\n");
+ kfree(cc_node_param);
+ return -EBUSY;
+ }
+ out_op_port->fmPcdInfo.h_CcNodesOrder[out_op_port->fmPcdInfo
+ .numOfCcNodes++] = out_op_port->fmPcdInfo.h_CcNodes[0];
+ kfree(cc_node_param);
+
+ /* define a tree with 1 group of size 1 only,
+ * i.e. all traffic goes to this node
+ */
+ cc_tree_param = kzalloc(sizeof(t_FmPcdCcTreeParams), GFP_KERNEL);
+ if (!cc_tree_param)
+ return -ENOMEM;
+ cc_tree_param->numOfGrps = 1;
+ cc_tree_param->h_NetEnv = out_op_port->fmPcdInfo.h_NetEnv;
+
+ cc_tree_param->ccGrpParams[0].numOfDistinctionUnits = 0;
+ cc_tree_param->ccGrpParams[0].nextEnginePerEntriesInGrp[0]
+ .nextEngine = e_FM_PCD_CC;
+ cc_tree_param->ccGrpParams[0].nextEnginePerEntriesInGrp[0]
+ .params.ccParams.h_CcNode = out_op_port->fmPcdInfo.h_CcNodes[0];
+
+ /* Build tree */
+ out_op_port->fmPcdInfo.h_CcTree =
+ FM_PCD_CcRootBuild(capwap_domain->h_fm_pcd, cc_tree_param);
+ if (!out_op_port->fmPcdInfo.h_CcTree) {
+ pr_err("FM_PCD_CcRootBuild failed\n");
+ kfree(cc_tree_param);
+ return -EBUSY;
+ }
+ kfree(cc_tree_param);
+
+ /* bind port to PCD properties */
+ /* initialize PCD parameters */
+ pcd_param = kzalloc(sizeof(t_FmPortPcdParams), GFP_KERNEL);
+ if (!pcd_param)
+ return -ENOMEM;
+ pcd_param->h_NetEnv = out_op_port->fmPcdInfo.h_NetEnv;
+ pcd_param->pcdSupport = e_FM_PORT_PCD_SUPPORT_CC_ONLY;
+ pcd_param->p_CcParams = &cc_param;
+
+ /* initialize coarse classification parameters */
+ memset(&cc_param, 0, sizeof(t_FmPortPcdCcParams));
+ cc_param.h_CcTree = out_op_port->fmPcdInfo.h_CcTree;
+
+ FM_PORT_Disable(capwap_domain->h_op_port);
+ err = FM_PORT_SetPCD(capwap_domain->h_op_port, pcd_param);
+ FM_PORT_Enable(capwap_domain->h_op_port);
+ kfree(pcd_param);
+
+ capwap_domain->h_flow_id_table = out_op_port->fmPcdInfo.h_CcNodes[0];
+
+ return err;
+}
+
+static int calc_key_size(bool ipv6,
+ uint32_t valid_key_fields,
+ uint8_t *key_size)
+{
+ uint32_t i, field_mask = 0;
+
+ for (i = 0; i < sizeof(uint32_t) * BITS_PER_BYTE; i++) {
+ field_mask = (uint32_t)(1 << i);
+ switch (valid_key_fields & field_mask) {
+ case DPAA_CAPWAP_DOMAIN_KEY_FIELD_SIP:
+ case DPAA_CAPWAP_DOMAIN_KEY_FIELD_DIP:
+ if (ipv6)
+ *key_size += NET_HEADER_FIELD_IPv6_ADDR_SIZE;
+ else
+ *key_size += NET_HEADER_FIELD_IPv4_ADDR_SIZE;
+ break;
+ case DPAA_CAPWAP_DOMAIN_KEY_FIELD_PROTO:/* same size for ipv6 */
+ *key_size += NET_HEADER_FIELD_IPv4_PROTO_SIZE;
+ break;
+ case DPAA_CAPWAP_DOMAIN_KEY_FIELD_SPORT:
+ case DPAA_CAPWAP_DOMAIN_KEY_FIELD_DPORT:
+ *key_size += NET_HEADER_FIELD_UDP_PORT_SIZE;
+ break;
+ case DPAA_CAPWAP_DOMAIN_KEY_FIELD_PREAMBLE:
+ *key_size += 1;
+ break;
+ case DPAA_CAPWAP_DOMAIN_KEY_FIELD_DTLS_TYPE:
+ *key_size += 1;
+ break;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+static const struct of_device_id dpa_capwap_match[] = {
+ {
+ .compatible = "fsl,dpa-ethernet-shared"
+ },
+ {}
+};
+
+static struct net_device *get_net_dev(void)
+{
+ struct device_node *capwap_eth_node;
+ struct platform_device *capwap_eth_dev;
+ struct net_device *net_dev;
+
+ capwap_eth_node = of_find_matching_node(NULL, &dpa_capwap_match[0]);
+ if (!capwap_eth_node) {
+ pr_err("Couln't find the device_node CAPWAP Ethernet, check the device tree\n");
+ return NULL;
+ }
+
+ capwap_eth_dev = of_find_device_by_node(capwap_eth_node);
+ if (!capwap_eth_dev) {
+ pr_err("CAPWAP Ethernet of_device null\n");
+ return NULL;
+ }
+
+ net_dev = dev_get_drvdata(&capwap_eth_dev->dev);
+
+ return net_dev;
+
+}
+
+struct dpaa_capwap_domain *dpaa_capwap_domain_config(
+ struct dpaa_capwap_domain_params *new_capwap_domain)
+{
+ struct dpaa_capwap_domain *capwap_domain = NULL;
+ int ret = 0;
+
+ pr_info("dpaa_capwap_domain_config\n");
+
+ capwap_domain = kzalloc(sizeof(struct dpaa_capwap_domain), GFP_KERNEL);
+ if (!capwap_domain) {
+ pr_err("no memory for DPAA CAPWAP DOMAIN\n");
+ return NULL;
+ }
+
+ capwap_domain->h_fm_pcd = new_capwap_domain->h_fm_pcd;
+ capwap_domain->max_num_of_tunnels =
+ new_capwap_domain->max_num_of_tunnels;
+ capwap_domain->support_ipv6 = new_capwap_domain->support_ipv6;
+
+ capwap_domain->post_dec_op_port.fm_id =
+ new_capwap_domain->inbound_op.fm_id;
+ capwap_domain->post_dec_op_port.port_id =
+ new_capwap_domain->inbound_op.port_id;
+ capwap_domain->out_op_port.fm_id =
+ new_capwap_domain->outbound_op.fm_id;
+ capwap_domain->out_op_port.port_id =
+ new_capwap_domain->outbound_op.port_id;
+ capwap_domain->h_op_port = new_capwap_domain->outbound_op.port_handle;
+ capwap_domain->h_em_table =
+ new_capwap_domain->inbound_pre_params.h_Table;
+ capwap_domain->key_fields =
+ new_capwap_domain->inbound_pre_params.key_fields;
+ capwap_domain->mask_fields =
+ new_capwap_domain->inbound_pre_params.mask_fields;
+
+ calc_key_size(capwap_domain->support_ipv6,
+ capwap_domain->key_fields, &capwap_domain->key_size);
+ if (capwap_domain->key_size > TABLE_KEY_MAX_SIZE) {
+ kfree(capwap_domain);
+ pr_err("hash key size exceeded %d bytes\n", TABLE_KEY_MAX_SIZE);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&capwap_domain->in_tunnel_list);
+ INIT_LIST_HEAD(&capwap_domain->out_tunnel_list);
+
+ capwap_domain->post_dec_op_port.numOfTxQs =
+ capwap_domain->max_num_of_tunnels;
+ capwap_domain->out_op_port.numOfTxQs =
+ capwap_domain->max_num_of_tunnels*2;
+
+ ret = get_sec_info(&capwap_domain->secinfo);
+ if (ret)
+ return NULL;
+
+ pr_info("Capwap-Domain configuration done\n");
+
+ return capwap_domain;
+}
+
+static void dump_fq_ids(const struct dpaa_capwap_domain *domain)
+{
+ pr_info("***********************outbound***********************\n");
+ pr_info("DTLS-Control: Core--(0x%x)-->OP--(0x%x)-->Sec--(0x%x)-->OP--(0x%x)-->Tx\n",
+ domain->fqs->outbound_core_tx_fqs.fqid_base + 0,
+ domain->fqs->outbound_op_tx_fqs.fqid_base + 0,
+ domain->fqs->outbound_sec_to_op_fqs.fqid_base + 0,
+ domain->fqs->outbound_op_tx_fqs.fqid_base + 4 + 0);
+ pr_info("DTLS-Data: Core--(0x%x)-->OP--(0x%x)-->Sec--(0x%x)-->OP--(0x%x)-->Tx\n",
+ domain->fqs->outbound_core_tx_fqs.fqid_base + 1,
+ domain->fqs->outbound_op_tx_fqs.fqid_base + 1,
+ domain->fqs->outbound_sec_to_op_fqs.fqid_base + 1,
+ domain->fqs->outbound_op_tx_fqs.fqid_base + 4 + 1);
+ pr_info("N-DTLS-Control:Core--(0x%x)-->OP--(0x%x)-->OP--(0x%x)-->Tx\n",
+ domain->fqs->outbound_core_tx_fqs.fqid_base + 2,
+ domain->fqs->outbound_op_tx_fqs.fqid_base + 2,
+ domain->fqs->outbound_op_tx_fqs.fqid_base + 4 + 2);
+ pr_info("N-DTLS-Data: Core--(0x%x)-->OP--(0x%x)-->OP--(0x%x)-->Tx\n",
+ domain->fqs->outbound_core_tx_fqs.fqid_base + 3,
+ domain->fqs->outbound_op_tx_fqs.fqid_base + 3,
+ domain->fqs->outbound_op_tx_fqs.fqid_base + 4 + 3);
+ pr_info("***********************inbound***********************\n");
+ pr_info("DTLS-Control: Rx--(0x%x)-->Sec--(0x%x)-->OP--(0x%x)-->Core\n",
+ domain->fqs->inbound_eth_rx_fqs.fqid_base + 0,
+ domain->fqs->inbound_sec_to_op_fqs.fqid_base + 0,
+ domain->fqs->inbound_core_rx_fqs.fqid_base + 0);
+ pr_info("DTLS-Data: Rx--(0x%x)-->Sec--(0x%x)-->OP--(0x%x)-->Core\n",
+ domain->fqs->inbound_eth_rx_fqs.fqid_base + 1,
+ domain->fqs->inbound_sec_to_op_fqs.fqid_base + 1,
+ domain->fqs->inbound_core_rx_fqs.fqid_base + 1);
+ pr_info("N-DTLS-Control:Rx--(0x%x)-->OP--(0x%x)-->Core\n",
+ domain->fqs->inbound_eth_rx_fqs.fqid_base + 2,
+ domain->fqs->inbound_core_rx_fqs.fqid_base + 2);
+ pr_info("N-DTLS-Data: Rx--(0x%x)-->OP--(0x%x)-->Core\n",
+ domain->fqs->inbound_eth_rx_fqs.fqid_base + 3,
+ domain->fqs->inbound_core_rx_fqs.fqid_base + 3);
+ pr_info("N-CAPWAP: 0x%x\n",
+ domain->fqs->inbound_eth_rx_fqs.fqid_base + 4);
+}
+
+int dpaa_capwap_domain_init(struct dpaa_capwap_domain *capwap_domain)
+{
+ struct dpaa_capwap_tunnel *p_tunnel;
+ int err = 0;
+ uint32_t i;
+ struct net_device *net_dev = NULL;
+
+ if (!capwap_domain) {
+ pr_err("failed for %s\n", __func__);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < capwap_domain->max_num_of_tunnels; i++) {
+ p_tunnel =
+ kzalloc(sizeof(struct dpaa_capwap_tunnel), GFP_KERNEL);
+ if (!p_tunnel)
+ goto no_memory;
+
+ p_tunnel->auth_data.auth_key = kzalloc(DTLS_KEY_MAX_SIZE,
+ GFP_KERNEL | GFP_DMA);
+ if (!p_tunnel->auth_data.auth_key)
+ goto no_memory;
+
+ p_tunnel->auth_data.split_key = kzalloc(DTLS_KEY_MAX_SIZE,
+ GFP_KERNEL | GFP_DMA);
+ if (!p_tunnel->auth_data.split_key)
+ goto no_memory;
+
+ p_tunnel->cipher_data.cipher_key = kzalloc(DTLS_KEY_MAX_SIZE,
+ GFP_KERNEL | GFP_DMA);
+ if (!p_tunnel->cipher_data.cipher_key)
+ goto no_memory;
+
+ p_tunnel->p_key = kzalloc(capwap_domain->key_size, GFP_KERNEL);
+ if (!p_tunnel->p_key)
+ goto no_memory;
+
+ p_tunnel->p_mask = kzalloc(capwap_domain->key_size, GFP_KERNEL);
+ if (!p_tunnel->p_mask)
+ goto no_memory;
+
+ p_tunnel->tunnel_dir = e_DPAA_CAPWAP_DOMAIN_DIR_INBOUND;
+ p_tunnel->dpaa_capwap_domain = capwap_domain;
+ p_tunnel->tunnel_id = i;
+ INIT_LIST_HEAD(&p_tunnel->fq_chain_head);
+ enqueue_tunnel_obj(&capwap_domain->in_tunnel_list, p_tunnel);
+
+ p_tunnel =
+ kzalloc(sizeof(struct dpaa_capwap_tunnel), GFP_KERNEL);
+ if (!p_tunnel)
+ goto no_memory;
+
+ p_tunnel->auth_data.auth_key = kzalloc(DTLS_KEY_MAX_SIZE,
+ GFP_KERNEL | GFP_DMA);
+ if (!p_tunnel->auth_data.auth_key)
+ goto no_memory;
+
+ p_tunnel->auth_data.split_key = kzalloc(DTLS_KEY_MAX_SIZE,
+ GFP_KERNEL | GFP_DMA);
+ if (!p_tunnel->auth_data.split_key)
+ goto no_memory;
+
+ p_tunnel->cipher_data.cipher_key = kzalloc(DTLS_KEY_MAX_SIZE,
+ GFP_KERNEL | GFP_DMA);
+ if (!p_tunnel->cipher_data.cipher_key)
+ goto no_memory;
+
+ p_tunnel->tunnel_dir = e_DPAA_CAPWAP_DOMAIN_DIR_OUTBOUND;
+ p_tunnel->dpaa_capwap_domain = capwap_domain;
+ p_tunnel->tunnel_id = i;
+ INIT_LIST_HEAD(&p_tunnel->fq_chain_head);
+ enqueue_tunnel_obj(&capwap_domain->out_tunnel_list, p_tunnel);
+ }
+
+ err = set_outbound_pcd(capwap_domain);
+ if (err) {
+ pr_err("set_outbound_pcd error:%d\n", err);
+ return err;
+ }
+
+ net_dev = get_net_dev();
+ if (net_dev == NULL) {
+ pr_err("No CAPWAP Ethernet Device\n");
+ return -ENODEV;
+ }
+
+ capwap_domain->net_dev = net_dev;
+ capwap_domain->bpid = get_capwap_bpid(net_dev);
+
+ err = op_init(&capwap_domain->post_dec_op_port, net_dev);
+ if (err) {
+ pr_err("outbound OP init failed\n");
+ return err;
+ }
+ err = op_init(&capwap_domain->out_op_port, net_dev);
+ if (err) {
+ pr_err("inbound OP init failed\n");
+ return err;
+ }
+
+ capwap_domain->fqs = get_domain_fqs();
+ if (capwap_domain->fqs == NULL) {
+ pr_err("Alloc fqs for capwap domain failed\n");
+ return err;
+ }
+ err = capwap_fq_pre_init(capwap_domain);
+ if (err) {
+ pr_err("pre-init fq for capwap domain failed\n");
+ return err;
+ }
+ dump_fq_ids(capwap_domain);
+
+ err = capwap_tunnel_drv_init(capwap_domain);
+ if (err) {
+ pr_err("Capwap Tunnel Driver init failed\n");
+ return err;
+ }
+
+ err = capwap_br_init(capwap_domain);
+ if (err) {
+ pr_err("Capwap Bridge Driver init failed\n");
+ return err;
+ }
+ return 0;
+
+no_memory:
+ if (p_tunnel) {
+ kfree(p_tunnel->auth_data.auth_key);
+ kfree(p_tunnel->auth_data.split_key);
+ kfree(p_tunnel->cipher_data.cipher_key);
+ kfree(p_tunnel->p_key);
+ kfree(p_tunnel->p_mask);
+ kfree(p_tunnel);
+ }
+ p_tunnel = dequeue_tunnel_obj(&capwap_domain->in_tunnel_list);
+ while (p_tunnel) {
+ kfree(p_tunnel->auth_data.auth_key);
+ kfree(p_tunnel->auth_data.split_key);
+ kfree(p_tunnel->cipher_data.cipher_key);
+ kfree(p_tunnel->p_key);
+ kfree(p_tunnel->p_mask);
+ kfree(p_tunnel);
+ p_tunnel = dequeue_tunnel_obj(&capwap_domain->in_tunnel_list);
+ }
+ p_tunnel = dequeue_tunnel_obj(&capwap_domain->out_tunnel_list);
+ while (p_tunnel) {
+ kfree(p_tunnel->auth_data.auth_key);
+ kfree(p_tunnel->auth_data.split_key);
+ kfree(p_tunnel->cipher_data.cipher_key);
+ kfree(p_tunnel);
+ p_tunnel = dequeue_tunnel_obj(&capwap_domain->in_tunnel_list);
+ }
+ pr_err("no memory for malloc in %s\n", __func__);
+ return -ENOMEM;
+
+}
+
+uint16_t get_flow_index(bool is_dtls, bool is_control_tunnel)
+{
+ if (is_dtls && is_control_tunnel)
+ return 0;
+ else if (is_dtls && !is_control_tunnel)
+ return 1;
+ else if (!is_dtls && is_control_tunnel)
+ return 2;
+ else
+ return 3;
+}
+
+int add_in_tunnel(struct dpaa_capwap_domain *capwap_domain,
+ struct dpaa_capwap_tunnel *p_tunnel,
+ struct dpaa_capwap_domain_tunnel_in_params *in_tunnel_params)
+{
+ t_FmPcdManipParams fm_pcd_manip_params;
+ t_FmPcdCcKeyParams key_params;
+ uint8_t match_key_size = 0;
+ int err = 0;
+ struct auth_params *auth;
+ struct cipher_params *cipher;
+ struct dtls_block_decap_pdb *pdb;
+ struct dtls_decap_descriptor_t *preheader_initdesc;
+ struct qman_fq *fq;
+ uint16_t desc_len;
+ unsigned char *buff_start = NULL;
+ u64 context_a = 0;
+ uint32_t context_b = 0;
+ uint16_t channel;
+ uint16_t flow_index;
+ dma_addr_t dma_addr;
+ struct qman_fq_chain *fq_node;
+
+ flow_index = get_flow_index(in_tunnel_params->dtls,
+ in_tunnel_params->is_control);
+
+ /* Configure of the DTLS decryption parameters */
+ if (in_tunnel_params->dtls) {
+ preheader_initdesc =
+ kzalloc(sizeof(struct dtls_decap_descriptor_t),
+ GFP_KERNEL);
+ if (preheader_initdesc == NULL) {
+ pr_err("error: %s: No More Buffers left for Descriptor\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ desc_len = (sizeof(struct dtls_decap_descriptor_t) -
+ sizeof(struct preheader_t)) / sizeof(uint32_t);
+
+ buff_start = (unsigned char *)preheader_initdesc +
+ sizeof(struct preheader_t);
+
+ pdb = &preheader_initdesc->pdb;
+
+ if (in_tunnel_params->dtls_params.wbIv)
+ pdb->options |= DTLS_ENCAP_OPTION_W_B;
+
+ switch (in_tunnel_params->dtls_params.arw) {
+ case e_DTLS_ARS_32:
+ pdb->options |= DTLS_DECAP_OPTION_32_ENTRY_ARS;
+ break;
+ case e_DTLS_ARS_64:
+ pdb->options |= DTLS_DECAP_OPTION_64_ENTRY_ARS;
+ break;
+ default:
+ break;
+ }
+
+ pdb->epoch = in_tunnel_params->dtls_params.epoch;
+ memcpy(pdb->seq_num, &in_tunnel_params->dtls_params.seq_num, 6);
+ memcpy(pdb->iv, in_tunnel_params->dtls_params.p_Iv, 16);
+
+ auth = &p_tunnel->auth_data;
+ cipher = &p_tunnel->cipher_data;
+
+ if ((in_tunnel_params->dtls_params.cipher_key_len / 8) >
+ DTLS_KEY_MAX_SIZE) {
+ pr_err("key size exceeded %d bytes", DTLS_KEY_MAX_SIZE);
+ kfree(preheader_initdesc);
+ return -EINVAL;
+ }
+
+ auth->auth_key_len =
+ in_tunnel_params->dtls_params.auth_key_len / 8;
+ memcpy(auth->auth_key, in_tunnel_params->dtls_params.auth_key,
+ auth->auth_key_len);
+ cipher->cipher_key_len =
+ in_tunnel_params->dtls_params.cipher_key_len / 8;
+ memcpy(cipher->cipher_key,
+ in_tunnel_params->dtls_params.cipher_key,
+ cipher->cipher_key_len);
+ auth->auth_type =
+ capwap_algs[in_tunnel_params->dtls_params.alg_type]
+ .auth_alg;
+ cipher->cipher_type =
+ capwap_algs[in_tunnel_params->dtls_params.alg_type]
+ .cipher_alg;
+
+ err = generate_split_key(auth, &capwap_domain->secinfo);
+ if (err) {
+ pr_err("error: %s: generate splitkey error\n",
+ __func__);
+ kfree(preheader_initdesc);
+ return err;
+ }
+
+ cnstr_shdsc_dtls_decap((uint32_t *) buff_start, &desc_len,
+ cipher, auth, 4);
+
+ preheader_initdesc->prehdr.lo.field.pool_id =
+ capwap_domain->bpid;
+ preheader_initdesc->prehdr.lo.field.offset = 1;
+ preheader_initdesc->prehdr.hi.field.idlen = desc_len;
+
+ p_tunnel->sec_desc = (t_Handle)preheader_initdesc;
+
+ dma_addr = dma_map_single(capwap_domain->secinfo.jrdev,
+ p_tunnel->sec_desc,
+ sizeof(struct preheader_t) + desc_len * 4,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(capwap_domain->secinfo.jrdev, dma_addr)) {
+ pr_err("Unable to DMA map the dtls decap-descriptor address\n");
+ kfree(preheader_initdesc);
+ return -ENOMEM;
+ }
+
+ /* Init FQ from Rx port to SEC */
+ fq = (struct qman_fq *)
+ capwap_domain->fqs->inbound_eth_rx_fqs.fq_base;
+ fq[flow_index].fqid =
+ capwap_domain->fqs->inbound_eth_rx_fqs.fqid_base +
+ flow_index;
+ channel = qm_channel_caam;
+ context_a = (u64)dma_addr;
+ context_b = capwap_domain->fqs->inbound_sec_to_op_fqs.fqid_base
+ + flow_index;
+ err = capwap_fq_tx_init(&fq[flow_index], channel, context_a,
+ context_b);
+ if (err)
+ goto error;
+
+ fq_node = kzalloc(sizeof(struct qman_fq_chain), GFP_KERNEL);
+ if (fq_node == NULL) {
+ err = -ENOMEM;
+ goto error;
+ }
+ fq_node->fq = &fq[flow_index];
+ list_add_tail(&fq_node->list, &p_tunnel->fq_chain_head);
+ }
+
+ /* Pre SEC Section */
+ memset(&key_params, 0, sizeof(t_FmPcdCcKeyParams));
+ key_params.p_Key = p_tunnel->p_key;
+ key_params.p_Mask = p_tunnel->p_mask;
+
+ memset(key_params.p_Key, 0, capwap_domain->key_size);
+ memset(key_params.p_Mask, 0xFF, capwap_domain->key_size);
+
+ if (capwap_domain->key_fields & DPAA_CAPWAP_DOMAIN_KEY_FIELD_SIP) {
+ memcpy(&key_params.p_Key[match_key_size],
+ (in_tunnel_params->sip.ipv6) ?
+ in_tunnel_params->sip.u.ipv6_addr :
+ (uint8_t *)&in_tunnel_params->sip.u.ipv4_addr,
+ (in_tunnel_params->sip.ipv6) ?
+ NET_HEADER_FIELD_IPv6_ADDR_SIZE :
+ NET_HEADER_FIELD_IPv4_ADDR_SIZE);
+
+ memcpy(&key_params.p_Mask[match_key_size],
+ (in_tunnel_params->sip_mask.ipv6) ?
+ in_tunnel_params->sip_mask.u.ipv6_mask :
+ (uint8_t *)&in_tunnel_params->sip_mask.u.ipv4_mask,
+ (in_tunnel_params->sip_mask.ipv6) ?
+ NET_HEADER_FIELD_IPv6_ADDR_SIZE :
+ NET_HEADER_FIELD_IPv4_ADDR_SIZE);
+
+ match_key_size += (in_tunnel_params->sip.ipv6) ?
+ NET_HEADER_FIELD_IPv6_ADDR_SIZE :
+ NET_HEADER_FIELD_IPv4_ADDR_SIZE;
+ }
+ if (capwap_domain->key_fields & DPAA_CAPWAP_DOMAIN_KEY_FIELD_DIP) {
+ memcpy(&key_params.p_Key[match_key_size],
+ (in_tunnel_params->dip.ipv6) ?
+ in_tunnel_params->dip.u.ipv6_addr :
+ (uint8_t *)&in_tunnel_params->dip.u.ipv4_addr,
+ (in_tunnel_params->dip.ipv6) ?
+ NET_HEADER_FIELD_IPv6_ADDR_SIZE :
+ NET_HEADER_FIELD_IPv4_ADDR_SIZE);
+
+ memcpy(&key_params.p_Mask[match_key_size],
+ (in_tunnel_params->dip_mask.ipv6) ?
+ in_tunnel_params->dip_mask.u.ipv6_mask :
+ (uint8_t *)&in_tunnel_params->dip_mask.u.ipv4_mask,
+ (in_tunnel_params->dip_mask.ipv6) ?
+ NET_HEADER_FIELD_IPv6_ADDR_SIZE :
+ NET_HEADER_FIELD_IPv4_ADDR_SIZE);
+
+ match_key_size += (in_tunnel_params->dip.ipv6)
+ ? NET_HEADER_FIELD_IPv6_ADDR_SIZE :
+ NET_HEADER_FIELD_IPv4_ADDR_SIZE;
+ }
+
+ if (capwap_domain->key_fields & DPAA_CAPWAP_DOMAIN_KEY_FIELD_PROTO) {
+ key_params.p_Key[match_key_size] = IPPROTO_UDP; /* UDP */
+ match_key_size += NET_HEADER_FIELD_IPv4_PROTO_SIZE;
+ }
+
+ if (capwap_domain->key_fields & DPAA_CAPWAP_DOMAIN_KEY_FIELD_SPORT) {
+ memcpy(&key_params.p_Key[match_key_size],
+ &in_tunnel_params->src_port,
+ NET_HEADER_FIELD_UDP_PORT_SIZE);
+ match_key_size += NET_HEADER_FIELD_UDP_PORT_SIZE;
+ }
+
+ if (capwap_domain->key_fields & DPAA_CAPWAP_DOMAIN_KEY_FIELD_DPORT) {
+ memcpy(&key_params.p_Key[match_key_size],
+ &in_tunnel_params->dst_port,
+ NET_HEADER_FIELD_UDP_PORT_SIZE);
+ match_key_size += NET_HEADER_FIELD_UDP_PORT_SIZE;
+ }
+
+ if (capwap_domain->key_fields & DPAA_CAPWAP_DOMAIN_KEY_FIELD_PREAMBLE) {
+ key_params.p_Key[match_key_size] = (in_tunnel_params->dtls) ?
+ 1 : 0; /* DTLS or not */
+ match_key_size += 1;
+ }
+
+ if ((in_tunnel_params->dtls) && (capwap_domain->key_fields &
+ DPAA_CAPWAP_DOMAIN_KEY_FIELD_DTLS_TYPE)) {
+ key_params.p_Key[match_key_size] =
+ in_tunnel_params->dtls_params.type;
+ match_key_size += 1;
+ }
+
+ memset(&key_params.p_Mask[match_key_size], 0,
+ capwap_domain->key_size - match_key_size);
+
+ memset(&fm_pcd_manip_params, 0, sizeof(fm_pcd_manip_params));
+ fm_pcd_manip_params.type = e_FM_PCD_MANIP_HDR;
+ fm_pcd_manip_params.u.hdr.dontParseAfterManip = TRUE;
+ fm_pcd_manip_params.u.hdr.rmv = TRUE;
+ fm_pcd_manip_params.u.hdr.rmvParams.type = e_FM_PCD_MANIP_RMV_BY_HDR;
+ fm_pcd_manip_params.u.hdr.rmvParams.u.byHdr.type =
+ e_FM_PCD_MANIP_RMV_BY_HDR_FROM_START;
+ fm_pcd_manip_params.u.hdr.rmvParams.u.byHdr.u.hdrInfo.hdr =
+ (in_tunnel_params->dtls) ? HEADER_TYPE_CAPWAP_DTLS :
+ HEADER_TYPE_CAPWAP;
+ p_tunnel->h_hm_till_manip = FM_PCD_ManipNodeSet(capwap_domain->h_fm_pcd,
+ &fm_pcd_manip_params);
+ if (!p_tunnel->h_hm_till_manip) {
+ pr_err("FM_PCD_ManipNodeSet failed");
+ err = -EINVAL;
+ goto error;
+ }
+
+ key_params.ccNextEngineParams.nextEngine = e_FM_PCD_DONE;
+ key_params.ccNextEngineParams.params.enqueueParams.action =
+ e_FM_PCD_ENQ_FRAME;
+ key_params.ccNextEngineParams.statisticsEn = TRUE;
+ key_params.ccNextEngineParams.params.enqueueParams.overrideFqid = TRUE;
+ key_params.ccNextEngineParams.params.enqueueParams.newFqid =
+ capwap_domain->fqs->inbound_eth_rx_fqs.fqid_base + flow_index;
+ key_params.ccNextEngineParams.h_Manip = p_tunnel->h_hm_till_manip;
+
+ p_tunnel->key_index = flow_index;
+
+ err = FM_PCD_MatchTableAddKey(capwap_domain->h_em_table, flow_index,
+ capwap_domain->key_size, &key_params);
+ if (err != E_OK)
+ goto error;
+
+ return E_OK;
+error:
+ if (in_tunnel_params->dtls) {
+ qman_destroy_fq(&fq[flow_index], 0);
+ dma_unmap_single(capwap_domain->secinfo.jrdev,
+ dma_addr,
+ sizeof(struct preheader_t) + desc_len * 4,
+ DMA_TO_DEVICE);
+ kfree(preheader_initdesc);
+ }
+ return err;
+}
+
+static int remove_in_tunnel(struct dpaa_capwap_tunnel *p_tunnel)
+{
+ struct dpaa_capwap_domain *capwap_domain;
+ struct qman_fq_chain *fq_node, *tmp;
+ int err;
+
+ capwap_domain = p_tunnel->dpaa_capwap_domain;
+ /* Take care of ingress side */
+ /* First, remove the match-key for this flow */
+ err = FM_PCD_MatchTableRemoveKey(capwap_domain->h_em_table,
+ p_tunnel->key_index);
+ if (err != E_OK)
+ return err;
+
+ if (p_tunnel->h_hm_till_manip) {
+ FM_PCD_ManipNodeDelete(p_tunnel->h_hm_till_manip);
+ p_tunnel->h_hm_till_manip = NULL;
+ }
+
+ list_for_each_entry_safe(fq_node, tmp, &p_tunnel->fq_chain_head, list) {
+ teardown_fq(fq_node->fq);
+ list_del(&fq_node->list);
+ }
+
+ return 0;
+}
+
+int add_out_tunnel(struct dpaa_capwap_domain *capwap_domain,
+ struct dpaa_capwap_tunnel *p_tunnel,
+ struct dpaa_capwap_domain_tunnel_out_params *out_tunnel_params)
+{
+ t_FmPcdCcNextEngineParams *fm_pcd_cc_next_engine_params = NULL;
+ t_FmPcdManipParams *fm_pcd_manip_params = NULL;
+ t_FmPcdCcNodeParams *cc_node_param = NULL;
+ struct t_Port *out_op_port;
+ uint32_t fqid;
+ int err = 0;
+ struct dtls_block_encap_pdb *pdb;
+ struct auth_params *auth;
+ struct cipher_params *cipher;
+ struct dtls_encap_descriptor_t *preheader_initdesc;
+ struct qman_fq *fq;
+ uint16_t desc_len;
+ unsigned char *buff_start = NULL;
+ u64 context_a = 0;
+ uint32_t context_b = 0;
+ uint16_t channel;
+ uint16_t flow_index;
+ dma_addr_t dma_addr;
+ struct qman_fq_chain *fq_node;
+
+ if (!capwap_domain || !p_tunnel)
+ return -EINVAL;
+
+ if (!out_tunnel_params->p_ether_header ||
+ !out_tunnel_params->eth_header_size ||
+ !out_tunnel_params->p_ip_header ||
+ !out_tunnel_params->ip_header_size ||
+ !out_tunnel_params->p_udp_header) {
+ pr_err("must provide ETH+IP+UDP headers and sizes\n");
+ return -EINVAL;
+ }
+
+ if (!out_tunnel_params->p_NextEngineParams) {
+ pr_err("must provide next-engine-params\n");
+ return -EINVAL;
+ }
+
+ if (out_tunnel_params->p_NextEngineParams->h_Manip) {
+ pr_err("cannot provide next-engine-params with pointer to manipulation\n");
+ return -EINVAL;
+ }
+
+ flow_index = get_flow_index(out_tunnel_params->dtls,
+ out_tunnel_params->is_control);
+
+ /* Post SEC Section */
+ fm_pcd_manip_params = kzalloc(sizeof(t_FmPcdManipParams), GFP_KERNEL);
+ if (fm_pcd_manip_params == NULL)
+ return -ENOMEM;
+ fm_pcd_manip_params->type = e_FM_PCD_MANIP_HDR;
+ fm_pcd_manip_params->u.hdr.dontParseAfterManip = TRUE;
+ fm_pcd_manip_params->u.hdr.insrt = TRUE;
+ fm_pcd_manip_params->u.hdr.insrtParams.type =
+ e_FM_PCD_MANIP_INSRT_GENERIC;
+ fm_pcd_manip_params->u.hdr.insrtParams.u.generic.offset = 0;
+ fm_pcd_manip_params->u.hdr.insrtParams.u.generic.size =
+ out_tunnel_params->eth_header_size;
+ fm_pcd_manip_params->u.hdr.insrtParams.u.generic.p_Data =
+ out_tunnel_params->p_ether_header;
+ p_tunnel->h_hm_l2 = FM_PCD_ManipNodeSet(capwap_domain->h_fm_pcd,
+ fm_pcd_manip_params);
+ if (!p_tunnel->h_hm_l2) {
+ pr_err("FM_PCD_ManipNodeSet failed in add_out_tunnel: Hml2\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ memset(fm_pcd_manip_params, 0, sizeof(t_FmPcdManipParams));
+ fm_pcd_manip_params->type = e_FM_PCD_MANIP_HDR;
+ fm_pcd_manip_params->u.hdr.dontParseAfterManip = TRUE;
+ fm_pcd_manip_params->u.hdr.insrt = TRUE;
+ fm_pcd_manip_params->u.hdr.insrtParams.type =
+ e_FM_PCD_MANIP_INSRT_BY_HDR;
+ fm_pcd_manip_params->u.hdr.insrtParams.u.byHdr.type =
+ e_FM_PCD_MANIP_INSRT_BY_HDR_IP;
+ fm_pcd_manip_params->u.hdr.insrtParams.u.byHdr.u.ipParams.calcL4Checksum
+ = TRUE;
+ fm_pcd_manip_params->u.hdr.insrtParams.u.byHdr.u.ipParams.id =
+ out_tunnel_params->initial_id;
+ fm_pcd_manip_params->u.hdr.insrtParams.u.byHdr.u.ipParams.mappingMode =
+ e_FM_PCD_MANIP_HDR_QOS_MAPPING_NONE;
+ fm_pcd_manip_params->u.hdr.insrtParams.u.byHdr.u.ipParams.lastPidOffset
+ = out_tunnel_params->last_pid_offset;
+ fm_pcd_manip_params->u.hdr.insrtParams.u.byHdr.u.ipParams.insrt.size =
+ out_tunnel_params->ip_header_size;
+ fm_pcd_manip_params->u.hdr.insrtParams.u.byHdr.u.ipParams.insrt.p_Data =
+ out_tunnel_params->p_ip_header;
+ fm_pcd_manip_params->h_NextManip = p_tunnel->h_hm_l2;
+ p_tunnel->h_hm_l3 = FM_PCD_ManipNodeSet(capwap_domain->h_fm_pcd,
+ fm_pcd_manip_params);
+ if (!p_tunnel->h_hm_l3) {
+ pr_err("FM_PCD_ManipNodeSet failed in add_out_tunnel: Hml3\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ memset(fm_pcd_manip_params, 0, sizeof(t_FmPcdManipParams));
+ fm_pcd_manip_params->type = e_FM_PCD_MANIP_HDR;
+ fm_pcd_manip_params->u.hdr.dontParseAfterManip = TRUE;
+ fm_pcd_manip_params->u.hdr.insrt = TRUE;
+ fm_pcd_manip_params->u.hdr.insrtParams.type =
+ e_FM_PCD_MANIP_INSRT_BY_HDR;
+ fm_pcd_manip_params->u.hdr.insrtParams.u.byHdr.type =
+ out_tunnel_params->udp_or_lite ?
+ e_FM_PCD_MANIP_INSRT_BY_HDR_UDP_LITE :
+ e_FM_PCD_MANIP_INSRT_BY_HDR_UDP;
+ fm_pcd_manip_params->u.hdr.insrtParams.u.byHdr.u.insrt.p_Data =
+ out_tunnel_params->p_udp_header;
+ fm_pcd_manip_params->u.hdr.insrtParams.u.byHdr.u.insrt.size =
+ UDP_HDR_SIZE;
+ fm_pcd_manip_params->h_NextManip = p_tunnel->h_hm_l3;
+ p_tunnel->h_hm_l4 = FM_PCD_ManipNodeSet(capwap_domain->h_fm_pcd,
+ fm_pcd_manip_params);
+ if (!p_tunnel->h_hm_l4) {
+ pr_err("FM_PCD_ManipNodeSet failed in add_out_tunnel: Hml4\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ out_tunnel_params->p_NextEngineParams->h_Manip = p_tunnel->h_hm_l4;
+ err = FM_PCD_MatchTableModifyNextEngine(capwap_domain->h_flow_id_table,
+ (uint16_t)GET_UPPER_TUNNEL_ID(flow_index),
+ out_tunnel_params->p_NextEngineParams);
+ if (err != E_OK) {
+ pr_err("FM_PCD_MatchTableModifyNextEngine failed in add_out_tunnel\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* Configure of the DTLS encryption parameters */
+ if (out_tunnel_params->dtls) {
+ preheader_initdesc =
+ kzalloc(sizeof(struct dtls_encap_descriptor_t),
+ GFP_KERNEL);
+ if (preheader_initdesc == NULL) {
+ pr_err("error: %s: No More Buffers left for Descriptor\n",
+ __func__);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ desc_len = (sizeof(struct dtls_encap_descriptor_t) -
+ sizeof(struct preheader_t)) / sizeof(uint32_t);
+
+ buff_start = (unsigned char *)preheader_initdesc +
+ sizeof(struct preheader_t);
+
+ pdb = &preheader_initdesc->pdb;
+ if (out_tunnel_params->dtls_params.wbIv)
+ pdb->options |= DTLS_ENCAP_OPTION_W_B;
+ pdb->epoch = out_tunnel_params->dtls_params.epoch;
+ pdb->type = out_tunnel_params->dtls_params.type;
+ memcpy(pdb->version,
+ &out_tunnel_params->dtls_params.version, 2);
+ memcpy(pdb->seq_num,
+ &out_tunnel_params->dtls_params.seq_num, 6);
+ memcpy(pdb->iv, out_tunnel_params->dtls_params.p_Iv, 16);
+
+ auth = &p_tunnel->auth_data;
+ cipher = &p_tunnel->cipher_data;
+
+ if ((out_tunnel_params->dtls_params.cipher_key_len / 8) >
+ DTLS_KEY_MAX_SIZE) {
+ pr_err("key size exceeded %d bytes", DTLS_KEY_MAX_SIZE);
+ kfree(preheader_initdesc);
+ err = -EINVAL;
+ goto out;
+ }
+
+ auth->auth_key_len =
+ out_tunnel_params->dtls_params.auth_key_len / 8;
+ memcpy(auth->auth_key, out_tunnel_params->dtls_params.auth_key,
+ auth->auth_key_len);
+ cipher->cipher_key_len =
+ out_tunnel_params->dtls_params.cipher_key_len / 8;
+ memcpy(cipher->cipher_key,
+ out_tunnel_params->dtls_params.cipher_key,
+ cipher->cipher_key_len);
+ auth->auth_type =
+ capwap_algs[out_tunnel_params->dtls_params.alg_type].
+ auth_alg;
+ cipher->cipher_type =
+ capwap_algs[out_tunnel_params->dtls_params.alg_type].
+ cipher_alg;
+
+ err = generate_split_key(auth, &capwap_domain->secinfo);
+ if (err) {
+ pr_err("error: %s: generate splitkey error\n",
+ __func__);
+ kfree(preheader_initdesc);
+ goto out;
+ }
+
+ cnstr_shdsc_dtls_encap((uint32_t *) buff_start, &desc_len,
+ cipher, auth, (uint32_t)4);
+
+ preheader_initdesc->prehdr.lo.field.pool_id =
+ capwap_domain->bpid;
+ /* 64bytes offset in output fd*/
+ preheader_initdesc->prehdr.lo.field.offset = 2;
+ preheader_initdesc->prehdr.hi.field.idlen = desc_len;
+
+ p_tunnel->sec_desc = (t_Handle)preheader_initdesc;
+
+ dma_addr = dma_map_single(capwap_domain->secinfo.jrdev,
+ p_tunnel->sec_desc,
+ sizeof(struct preheader_t) + desc_len * 4,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(capwap_domain->secinfo.jrdev, dma_addr)) {
+ pr_err("Unable to DMA map the dtls decap-descriptor address\n");
+ kfree(preheader_initdesc);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* Init FQ from OP port to SEC */
+ fq = (struct qman_fq *)
+ capwap_domain->fqs->outbound_op_tx_fqs.fq_base;
+ fq[flow_index].fqid =
+ capwap_domain->fqs->outbound_op_tx_fqs.fqid_base +
+ flow_index;
+ channel = qm_channel_caam;
+ context_a = (u64)dma_addr;
+ context_b =
+ capwap_domain->fqs->outbound_sec_to_op_fqs.fqid_base +
+ flow_index;
+ err = capwap_fq_tx_init(&fq[flow_index], channel, context_a,
+ context_b);
+ if (err)
+ goto error_dma;
+
+ fq_node = kzalloc(sizeof(struct qman_fq_chain), GFP_KERNEL);
+ if (fq_node == NULL) {
+ err = -ENOMEM;
+ goto error_dma;
+ }
+ fq_node->fq = &fq[flow_index];
+ list_add_tail(&fq_node->list, &p_tunnel->fq_chain_head);
+ }
+
+ /* Pre SEC Section
+ * 1. copy ToS
+ * 2. insert CAPWAP
+ * 3. CAPWAP-manip
+ * 4. fragmentation
+ */
+ fm_pcd_cc_next_engine_params =
+ kzalloc(sizeof(t_FmPcdCcNextEngineParams), GFP_KERNEL);
+ if (fm_pcd_cc_next_engine_params == NULL) {
+ err = -ENOMEM;
+ goto error_dma;
+ }
+
+ fqid = capwap_domain->fqs->outbound_op_tx_fqs.fqid_base + flow_index;
+ memset(fm_pcd_cc_next_engine_params, 0,
+ sizeof(t_FmPcdCcNextEngineParams));
+ fm_pcd_cc_next_engine_params->nextEngine = e_FM_PCD_DONE;
+ fm_pcd_cc_next_engine_params->params.enqueueParams.action =
+ e_FM_PCD_ENQ_FRAME;
+ fm_pcd_cc_next_engine_params->params.enqueueParams.overrideFqid = TRUE;
+ fm_pcd_cc_next_engine_params->params.enqueueParams.newFqid = fqid;
+
+ if (out_tunnel_params->size_for_fragment) {
+ memset(fm_pcd_manip_params, 0, sizeof(t_FmPcdManipParams));
+ fm_pcd_manip_params->type = e_FM_PCD_MANIP_FRAG;
+ fm_pcd_manip_params->u.frag.hdr = HEADER_TYPE_CAPWAP;
+ fm_pcd_manip_params->u.frag.u.capwapFrag.sizeForFragmentation =
+ out_tunnel_params->size_for_fragment;
+ p_tunnel->h_capwap_frag =
+ FM_PCD_ManipNodeSet(capwap_domain->h_fm_pcd,
+ fm_pcd_manip_params);
+ if (!p_tunnel->h_capwap_frag) {
+ pr_err("FM_PCD_ManipNodeSet failed\n");
+ err = -EINVAL;
+ goto error_dma;
+ }
+ }
+
+ memset(fm_pcd_manip_params, 0, sizeof(t_FmPcdManipParams));
+ fm_pcd_manip_params->type = e_FM_PCD_MANIP_SPECIAL_OFFLOAD;
+ fm_pcd_manip_params->u.specialOffload.type =
+ e_FM_PCD_MANIP_SPECIAL_OFFLOAD_CAPWAP;
+ fm_pcd_manip_params->u.specialOffload.u.capwap.qosSrc =
+ e_FM_PCD_MANIP_HDR_QOS_SRC_NONE;
+ fm_pcd_manip_params->u.specialOffload.u.capwap.dtls =
+ out_tunnel_params->dtls;
+ if (p_tunnel->h_capwap_frag)
+ fm_pcd_manip_params->h_NextManip = p_tunnel->h_capwap_frag;
+ p_tunnel->h_capwap_manip = FM_PCD_ManipNodeSet(capwap_domain->h_fm_pcd,
+ fm_pcd_manip_params);
+ if (!p_tunnel->h_capwap_manip) {
+ pr_err("FM_PCD_ManipNodeSet failed\n");
+ err = -EINVAL;
+ goto error_dma;
+ }
+ fm_pcd_cc_next_engine_params->h_Manip = p_tunnel->h_capwap_manip;
+
+ if (out_tunnel_params->p_capwap_header) {
+ /* Need to create cc-table with miss to overcome the HM->MANIP
+ * ilegal connection
+ */
+ cc_node_param =
+ kzalloc(sizeof(t_FmPcdCcNodeParams), GFP_KERNEL);
+ if (cc_node_param == NULL) {
+ err = -ENOMEM;
+ goto error_dma;
+ }
+
+ out_op_port = &capwap_domain->out_op_port;
+ cc_node_param->extractCcParams.type = e_FM_PCD_EXTRACT_NON_HDR;
+ cc_node_param->extractCcParams.extractNonHdr.src =
+ e_FM_PCD_EXTRACT_FROM_FRAME_START;
+ cc_node_param->extractCcParams.extractNonHdr.action =
+ e_FM_PCD_ACTION_EXACT_MATCH;
+ cc_node_param->extractCcParams.extractNonHdr.offset = 0;
+ cc_node_param->extractCcParams.extractNonHdr.size = 1;
+ cc_node_param->keysParams.numOfKeys = 0;
+ cc_node_param->keysParams.keySize = 1;
+ cc_node_param->keysParams.maxNumOfKeys = 0;
+
+ memcpy(&cc_node_param->keysParams.ccNextEngineParamsForMiss,
+ fm_pcd_cc_next_engine_params,
+ sizeof(t_FmPcdCcNextEngineParams));
+
+ out_op_port->fmPcdInfo.h_CcNodes[out_op_port->
+ fmPcdInfo.numOfCcNodes] =
+ FM_PCD_MatchTableSet(capwap_domain->h_fm_pcd,
+ cc_node_param);
+ if (!out_op_port->fmPcdInfo.h_CcNodes[out_op_port->
+ fmPcdInfo.numOfCcNodes]) {
+ pr_err("FM_PCD_MatchTableSet failed\n");
+ err = -EINVAL;
+ goto error_dma;
+ }
+ p_tunnel->h_ccNode =
+ out_op_port->fmPcdInfo.h_CcNodes[out_op_port->
+ fmPcdInfo.numOfCcNodes];
+ out_op_port->fmPcdInfo.h_CcNodesOrder[out_op_port->
+ fmPcdInfo.numOfCcNodes] =
+ out_op_port->fmPcdInfo.h_CcNodes[out_op_port->
+ fmPcdInfo.numOfCcNodes];
+ out_op_port->fmPcdInfo.numOfCcNodes++;
+
+ memset(fm_pcd_manip_params, 0, sizeof(t_FmPcdManipParams));
+ fm_pcd_manip_params->type = e_FM_PCD_MANIP_HDR;
+ fm_pcd_manip_params->u.hdr.dontParseAfterManip = TRUE;
+ fm_pcd_manip_params->u.hdr.insrt = TRUE;
+ fm_pcd_manip_params->u.hdr.insrtParams.type =
+ e_FM_PCD_MANIP_INSRT_BY_HDR;
+ fm_pcd_manip_params->u.hdr.insrtParams.u.byHdr.type =
+ e_FM_PCD_MANIP_INSRT_BY_HDR_CAPWAP;
+ fm_pcd_manip_params->u.hdr.insrtParams.u.byHdr.u.insrt.p_Data =
+ out_tunnel_params->p_capwap_header;
+ fm_pcd_manip_params->u.hdr.insrtParams.u.byHdr.u.insrt.size =
+ out_tunnel_params->capwap_header_size;
+ p_tunnel->h_hm_capwap =
+ FM_PCD_ManipNodeSet(capwap_domain->h_fm_pcd,
+ fm_pcd_manip_params);
+ if (!p_tunnel->h_hm_capwap) {
+ pr_err("FM_PCD_ManipNodeSet failed\n");
+ err = -EINVAL;
+ goto error_dma;
+ }
+
+ memset(fm_pcd_cc_next_engine_params, 0,
+ sizeof(t_FmPcdCcNextEngineParams));
+ fm_pcd_cc_next_engine_params->nextEngine = e_FM_PCD_CC;
+ fm_pcd_cc_next_engine_params->params.ccParams.h_CcNode =
+ out_op_port->fmPcdInfo.h_CcNodes[out_op_port->
+ fmPcdInfo.numOfCcNodes-1];
+ fm_pcd_cc_next_engine_params->h_Manip = p_tunnel->h_hm_capwap;
+ }
+
+ err = FM_PCD_MatchTableModifyNextEngine(capwap_domain->h_flow_id_table,
+ (uint16_t)flow_index,
+ fm_pcd_cc_next_engine_params);
+ if (err != E_OK)
+ goto error_dma;
+
+ p_tunnel->key_index = flow_index;
+ err = 0;
+ goto out;
+
+error_dma:
+ if (out_tunnel_params->dtls) {
+ qman_destroy_fq(&fq[flow_index], 0);
+ dma_unmap_single(capwap_domain->secinfo.jrdev,
+ dma_addr,
+ sizeof(struct preheader_t) + desc_len * 4,
+ DMA_TO_DEVICE);
+ kfree(preheader_initdesc);
+ }
+out:
+ kfree(fm_pcd_manip_params);
+ kfree(fm_pcd_cc_next_engine_params);
+ kfree(cc_node_param);
+ return err;
+}
+
+static int remove_out_tunnel(struct dpaa_capwap_tunnel *p_tunnel)
+{
+ struct dpaa_capwap_domain *capwap_domain;
+ int err;
+ t_FmPcdCcNextEngineParams cc_next_engine_params;
+ struct t_Port *out_op_port;
+ struct qman_fq_chain *fq_node, *tmp;
+
+ capwap_domain = p_tunnel->dpaa_capwap_domain;
+ memset(&cc_next_engine_params, 0, sizeof(t_FmPcdCcNextEngineParams));
+ cc_next_engine_params.nextEngine = e_FM_PCD_DONE;
+ cc_next_engine_params.params.enqueueParams.action = e_FM_PCD_DROP_FRAME;
+ cc_next_engine_params.statisticsEn = TRUE;
+
+ err = FM_PCD_MatchTableModifyNextEngine(capwap_domain->h_flow_id_table,
+ (uint16_t)p_tunnel->key_index,
+ &cc_next_engine_params);
+ if (err != E_OK)
+ return err;
+ err = FM_PCD_MatchTableModifyNextEngine(capwap_domain->h_flow_id_table,
+ (uint16_t)GET_UPPER_TUNNEL_ID(p_tunnel->key_index),
+ &cc_next_engine_params);
+ if (err != E_OK)
+ return err;
+
+ if (p_tunnel->h_hm_l4) {
+ FM_PCD_ManipNodeDelete(p_tunnel->h_hm_l4);
+ p_tunnel->h_hm_l4 = NULL;
+ }
+ if (p_tunnel->h_hm_l3) {
+ FM_PCD_ManipNodeDelete(p_tunnel->h_hm_l3);
+ p_tunnel->h_hm_l3 = NULL;
+ }
+ if (p_tunnel->h_hm_l2) {
+ FM_PCD_ManipNodeDelete(p_tunnel->h_hm_l2);
+ p_tunnel->h_hm_l2 = NULL;
+ }
+ if (p_tunnel->h_ccNode) {
+ out_op_port = &capwap_domain->out_op_port;
+ err = FM_PCD_MatchTableDelete(p_tunnel->h_ccNode);
+ if (err != E_OK)
+ return err;
+ out_op_port->fmPcdInfo.numOfCcNodes--;
+ }
+ if (p_tunnel->h_capwap_manip) {
+ FM_PCD_ManipNodeDelete(p_tunnel->h_capwap_manip);
+ p_tunnel->h_capwap_manip = NULL;
+ }
+ if (p_tunnel->h_hm_capwap) {
+ FM_PCD_ManipNodeDelete(p_tunnel->h_hm_capwap);
+ p_tunnel->h_hm_capwap = NULL;
+ }
+
+ list_for_each_entry_safe(fq_node, tmp, &p_tunnel->fq_chain_head, list) {
+ teardown_fq(fq_node->fq);
+ list_del(&fq_node->list);
+ }
+
+ return 0;
+}
+
+int dpaa_capwap_domain_remove_tunnel(struct dpaa_capwap_tunnel *p_tunnel)
+{
+ struct dpaa_capwap_domain *capwap_domain;
+ int err = 0;
+
+ capwap_domain =
+ (struct dpaa_capwap_domain *)p_tunnel->dpaa_capwap_domain;
+ if (p_tunnel->tunnel_dir == e_DPAA_CAPWAP_DOMAIN_DIR_INBOUND)
+ err = remove_in_tunnel(p_tunnel);
+ else
+ err = remove_out_tunnel(p_tunnel);
+
+ if (err != E_OK)
+ return err;
+ if (p_tunnel->sec_desc)
+ p_tunnel->sec_desc = NULL;
+
+ if (p_tunnel->tunnel_dir == e_DPAA_CAPWAP_DOMAIN_DIR_INBOUND)
+ enqueue_tunnel_obj(&capwap_domain->in_tunnel_list,
+ p_tunnel);
+ else
+ enqueue_tunnel_obj(&capwap_domain->out_tunnel_list,
+ p_tunnel);
+
+ return E_OK;
+}
diff --git a/drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_domain.h b/drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_domain.h
new file mode 100644
index 0000000..8fd8122
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_domain.h
@@ -0,0 +1,179 @@
+/* Copyright (c) 2014 Freescale Semiconductor, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DPAA_CAPWAP_DOMAIN_H
+#define __DPAA_CAPWAP_DOMAIN_H
+
+#include "fm_port_ext.h"
+#include "fm_pcd_ext.h"
+#include "dpaa_capwap_domain_ext.h"
+#include "dpaa_capwap_desc.h"
+
+#define OUTER_HEADER_MAX_SIZE 100
+#define DTLS_KEY_MAX_SIZE 256
+#define TABLE_KEY_MAX_SIZE FM_PCD_MAX_SIZE_OF_KEY
+
+#define UDP_HDR_SIZE 8
+
+enum e_PortType {
+ e_CAPWAP_DOM_PORT_RXTX = 0,
+ e_CAPWAP_DOM_PORT_SEC_DEC,
+ e_CAPWAP_DOM_PORT_SEC_ENC,
+ e_CAPWAP_DOM_PORT_OP_POST_DEC,
+ e_CAPWAP_DOM_PORT_OP_OUT
+};
+
+struct qman_fq_chain {
+ struct qman_fq *fq;
+ struct list_head list;
+};
+
+struct dpaa_capwap_tunnel {
+ enum dpaa_capwap_domain_direction tunnel_dir;
+ uint32_t tunnel_id;
+ uint16_t key_index;
+ t_Handle dpaa_capwap_domain;
+ t_Handle sec_desc;
+
+ struct cipher_params cipher_data;
+ struct auth_params auth_data;
+
+ /* Tx internal info */
+ t_Handle h_hm_capwap;
+ t_Handle h_ccNode;
+ t_Handle h_hm_l2;
+ t_Handle h_hm_l3;
+ t_Handle h_hm_l4;
+ t_Handle h_capwap_frag;
+ t_Handle h_capwap_manip;
+
+ /* Rx Pre SEC internal info */
+ uint8_t *p_key;
+ uint8_t *p_mask;
+ t_Handle h_hm_till_manip;
+
+ struct list_head node;
+ struct list_head fq_chain_head;
+};
+
+struct t_FmPcdInfo {
+ t_Handle h_NetEnv;
+ t_Handle h_CcTree;
+ uint8_t numOfCcNodes;
+ t_Handle h_CcNodes[5];
+ t_Handle h_CcNodesOrder[5];
+};
+
+struct t_Port {
+ enum e_PortType type;
+ t_Handle h_DpaPort;
+ t_Handle h_Domain;
+ uint32_t rxPcdQsBase;
+ uint32_t numOfTxQs;
+ struct t_FmPcdInfo fmPcdInfo;
+ uint8_t fm_id;
+ uint8_t port_id;
+ uint16_t tx_ch;
+};
+
+struct dpaa_capwap_domain {
+ struct t_Port rx_tx_port;
+ struct t_Port post_dec_op_port;
+ struct t_Port out_op_port;
+
+ t_Handle h_fm_pcd;
+
+ uint32_t max_num_of_tunnels;
+ bool support_ipv6;
+
+ /* Tx internal info */
+ t_Handle h_op_port;
+ t_Handle h_flow_id_table;
+
+ /* Rx Pre SEC internal info */
+ uint8_t key_size;
+ uint32_t key_fields;
+ uint32_t mask_fields;
+ t_Handle h_em_table;
+
+ struct list_head in_tunnel_list;
+ struct list_head out_tunnel_list;
+ struct dpaa_capwap_sec_info secinfo;
+ struct dpaa_capwap_domain_fqs *fqs;
+ struct net_device *net_dev; /* Device for CAPWAP Ethernet */
+ uint8_t bpid;
+};
+
+static inline struct dpaa_capwap_tunnel *dequeue_tunnel_obj(
+ struct list_head *p_list)
+{
+ struct dpaa_capwap_tunnel *p_tunnel = NULL;
+ struct list_head *p_next;
+
+ if (!list_empty(p_list)) {
+ p_next = p_list->next;
+ p_tunnel = list_entry(p_next, struct dpaa_capwap_tunnel, node);
+ list_del(p_next);
+ }
+
+ return p_tunnel;
+}
+
+static inline void enqueue_tunnel_obj(struct list_head *p_List,
+ struct dpaa_capwap_tunnel *p_Tunnel)
+{
+ list_add_tail(&p_Tunnel->node, p_List);
+}
+
+int add_in_tunnel(struct dpaa_capwap_domain *capwap_domain,
+ struct dpaa_capwap_tunnel *p_tunnel,
+ struct dpaa_capwap_domain_tunnel_in_params *in_tunnel_params);
+
+int add_out_tunnel(struct dpaa_capwap_domain *capwap_domain,
+ struct dpaa_capwap_tunnel *p_tunnel,
+ struct dpaa_capwap_domain_tunnel_out_params *out_tunnel_params);
+
+struct dpaa_capwap_domain_fqs *get_domain_fqs(void);
+
+int op_init(struct t_Port *port, struct net_device *net_dev);
+int capwap_fq_pre_init(struct dpaa_capwap_domain *capwap_domain);
+int capwap_tunnel_drv_init(struct dpaa_capwap_domain *domain);
+uint8_t get_capwap_bpid(struct net_device *net_dev);
+int capwap_br_init(struct dpaa_capwap_domain *domain);
+uint16_t get_flow_index(bool is_dtls, bool is_control_tunnel);
+int capwap_kernel_rx_ctl(struct capwap_domain_kernel_rx_ctl *rx_ctl);
+struct dpaa_capwap_domain *dpaa_capwap_domain_config(
+ struct dpaa_capwap_domain_params *new_capwap_domain);
+int dpaa_capwap_domain_init(struct dpaa_capwap_domain *capwap_domain);
+int dpaa_capwap_domain_remove_tunnel(struct dpaa_capwap_tunnel *p_tunnel);
+
+#endif /* __DPAA_CAPWAP_DOMAIN_H */
diff --git a/drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_domain_ext.h b/drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_domain_ext.h
new file mode 100644
index 0000000..b67d1f9
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_domain_ext.h
@@ -0,0 +1,228 @@
+/* Copyright (c) 2014 Freescale Semiconductor, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DPAA_CAPWAP_DOMAIN_EXT_H
+#define __DPAA_CAPWAP_DOMAIN_EXT_H
+
+#include "error_ext.h"
+#include "std_ext.h"
+
+#include "fm_pcd_ext.h"
+
+/* Use source-address in key */
+#define DPAA_CAPWAP_DOMAIN_KEY_FIELD_SIP 0x00000001
+/* Use destination-address in key */
+#define DPAA_CAPWAP_DOMAIN_KEY_FIELD_DIP 0x00000002
+/* Use protocol field in key */
+#define DPAA_CAPWAP_DOMAIN_KEY_FIELD_PROTO 0x00000004
+/* Use UDP source-port in key */
+#define DPAA_CAPWAP_DOMAIN_KEY_FIELD_SPORT 0x00000008
+/* Use UDP destination-port in key */
+#define DPAA_CAPWAP_DOMAIN_KEY_FIELD_DPORT 0x00000010
+/* Use CAPWAP-Preamble in key (first BYTE);
+ * NOTE: This field MUST be in the key in order
+ * to distinguish between DTLS and non-DTLS tunnels
+ */
+#define DPAA_CAPWAP_DOMAIN_KEY_FIELD_PREAMBLE 0x00000020
+/* Use DTLS type in key (first BYTE) */
+#define DPAA_CAPWAP_DOMAIN_KEY_FIELD_DTLS_TYPE 0x00000040
+
+#define DPAA_CAPWAP_DOMAIN_MAX_NUM_OF_TUNNELS (FM_PCD_MAX_NUM_OF_FLOWS/2)
+
+ /* A structure for inbound-pre parameters */
+struct dpaa_capwap_domain_inbound_pre_params {
+ /* Flags indicating key components;
+ * (use DPAA_CAPWAP_DOMAIN_KEY_FIELD_xxx macros to configure)
+ */
+ uint32_t key_fields;
+ /* Flags indicating mask components;
+ * (use DPAA_CAPWAP_DOMAIN_KEY_FIELD_xxx macros to configure)
+ */
+ uint32_t mask_fields;
+ /* Handle to a table */
+ t_Handle h_Table;
+};
+
+struct capwap_op_port {
+ uint8_t fm_id;
+ uint8_t port_id;
+ t_Handle port_handle;
+};
+
+/* A structure for defining DPAA-CAPWAP-Domain initialization parameters */
+struct dpaa_capwap_domain_params {
+ struct dpaa_capwap_domain_inbound_pre_params inbound_pre_params;
+ void *h_fm_pcd; /* A handle to the FM-PCD module */
+ bool support_ipv6; /* TODO */
+ uint32_t max_num_of_tunnels;/* Maximal number of active Tunnels */
+
+ struct capwap_op_port outbound_op;
+ struct capwap_op_port inbound_op;
+
+ void *id; /*Output Params, the pointer of CAPWAP_DOMAIN */
+};
+
+ /* DPAA CAPWAP Domain Direction */
+enum dpaa_capwap_domain_direction {
+ e_DPAA_CAPWAP_DOMAIN_DIR_INVALID = 0, /* Invalid direction */
+ e_DPAA_CAPWAP_DOMAIN_DIR_INBOUND, /* Inbound direction */
+ e_DPAA_CAPWAP_DOMAIN_DIR_OUTBOUND /* Outbound direction */
+};
+
+ /* A structure for defining IP address */
+struct dpaa_capwap_domain_ip_address {
+ bool ipv6; /* TRUE for ipv6 format */
+
+ union {
+ /* IPv4 address format */
+ uint32_t ipv4_addr;
+ /* IPv6 address format */
+ uint8_t ipv6_addr[NET_HEADER_FIELD_IPv6_ADDR_SIZE];
+ } u;
+};
+
+/* A structure for defining IP mask */
+struct dpaa_capwap_domain_ip_mask {
+ bool ipv6; /* TRUE for ipv6 format */
+
+ union {
+ uint32_t ipv4_mask; /* IPv4 mask format */
+ uint8_t ipv6_mask[NET_HEADER_FIELD_IPv6_ADDR_SIZE];
+ /* IPv6 mask format */
+ } u;
+};
+
+/* DTLS Anti-Replay-Size Options */
+enum dtls_ars {
+ e_DTLS_ARS_0 , /* No anti-replay window */
+ e_DTLS_ARS_32, /* 32-entry anti-replay window */
+ e_DTLS_ARS_64 /* 64-entry anti-replay window */
+};
+
+struct dtls_sec_params {
+ /* IV writeback (block cipher only):
+ *FALSE: IV field in PDB held constant
+ *TRUE: IV field in PDB written back with last block of ciphertext
+ */
+ bool wbIv;
+ uint8_t type; /* Record type */
+ enum dtls_ars arw; /* Anti replay window */
+ uint16_t version; /* Record version */
+ uint16_t epoch; /* Record epoch */
+ uint64_t seq_num; /* Initial sequence number */
+ /* Initialization vector (16 bytes);
+ * Null pointer for using the internal random number generator
+ */
+ uint8_t p_Iv[16];
+ uint32_t alg_type;
+ uint8_t *cipher_key;
+ uint32_t cipher_key_len;
+ uint8_t *auth_key;
+ uint32_t auth_key_len;
+};
+
+ /* A structure for defining SA-Out parameters */
+struct dpaa_capwap_domain_tunnel_out_params {
+ uint32_t eth_header_size; /* size of ETH header */
+ uint8_t *p_ether_header; /* ETH encapsulation header */
+ uint32_t ip_header_size; /* size of IP header */
+ uint8_t *p_ip_header; /* IP encapsulation header */
+ /* offset of the last protocol-id field */
+ uint32_t last_pid_offset;
+ uint32_t initial_id; /* initial ID value; will be incremented
+ * for every frame
+ */
+ uint8_t *p_udp_header; /* UDP encapsulation header */
+ bool udp_or_lite; /* UDP or UDP-Lite header */
+ uint32_t capwap_header_size; /* size of CAPWAP header */
+ uint8_t *p_capwap_header; /* CAPWAP encapsulation header */
+ struct t_FmPcdCcNextEngineParams *p_NextEngineParams; /* TODO */
+ uint16_t size_for_fragment; /* If not zero than fragmenation is
+ * required and will be build by
+ * the driver
+ */
+ bool dtls; /* DTLS tunnel */
+ bool is_control; /* true: control tunnel, false: data tunnel */
+
+ struct dtls_sec_params dtls_params;
+
+ void *capwap_domain_id; /* The pointer of CAPWAP_DOMAIN */
+ void *tunnel_id; /* The pointer of CAPWAP_TUNNEL */
+};
+
+/* A structure for defining tunnel-in parameters */
+struct dpaa_capwap_domain_tunnel_in_params {
+ struct dpaa_capwap_domain_ip_address sip; /* Source IP address */
+ struct dpaa_capwap_domain_ip_mask sip_mask; /* Source IP mask */
+ struct dpaa_capwap_domain_ip_address dip; /* Destination IP address */
+ struct dpaa_capwap_domain_ip_mask dip_mask; /* Destination IP mask */
+ uint16_t src_port; /* Source UDP port */
+ uint16_t dst_port; /* Destination UDP port */
+
+ bool dtls; /* DTLS tunnel */
+ bool is_control; /* true: control tunnel,
+ * false: data tunnel
+ */
+
+ struct dtls_sec_params dtls_params;
+
+ void *capwap_domain_id; /* The pointer of CAPWAP_DOMAIN */
+ void *tunnel_id; /* The pointer of CAPWAP_TUNNEL */
+};
+
+struct fqid_range {
+ u32 fqid_base;
+ u32 fq_count;
+ void *fq_base; /* The base pointer for dpa_fq or qman_fq */
+};
+
+struct dpaa_capwap_domain_fqs {
+ /* Inbound FQs */
+ struct fqid_range inbound_eth_rx_fqs;
+ struct fqid_range inbound_sec_to_op_fqs;
+ struct fqid_range inbound_core_rx_fqs;
+ /* Outbound FQs */
+ struct fqid_range outbound_core_tx_fqs;
+ struct fqid_range outbound_op_tx_fqs;
+ struct fqid_range outbound_sec_to_op_fqs;
+ u32 debug_fqid;
+};
+
+struct capwap_domain_kernel_rx_ctl {
+ bool on; /*true: turn on, false: turn off */
+ bool is_control;/*true: control tunnel, false: data tunnel */
+ bool is_dtls; /*true: dtls tunnel, false: non-dtls-tunnel */
+ u32 fqid; /*fqid returned from kernel*/
+ void *capwap_domain_id; /* The pointer of CAPWAP_DOMAIN */
+};
+
+#endif /* __DPAA_CAPWAP_DOMAIN_EXT_H */
diff --git a/drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_fq.c b/drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_fq.c
new file mode 100644
index 0000000..996c6afb
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_fq.c
@@ -0,0 +1,626 @@
+/* Copyright 2014 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <compat.h>
+#include <linux/fsl_qman.h>
+
+#include "dpaa_capwap_domain.h"
+#include "dpaa_eth_common.h"
+#include "dpaa_capwap.h"
+#include "mac.h"
+
+/* flows: 0--capwap dtls control tunnel
+ * 1--capwap dtls data tunnel
+ * 2--capwap non-dtls control tunnel
+ * 3--capwap non-dtls data tunnel
+ */
+#define CAPWAP_FLOW_COUNT 4
+
+static struct dpaa_capwap_domain_fqs *fqs;
+
+static qman_cb_dqrr rx_cbs[] = { capwap_control_dtls_rx_dqrr,
+ capwap_data_dtls_rx_dqrr,
+ capwap_control_n_dtls_rx_dqrr,
+ capwap_data_n_dtls_rx_dqrr };
+
+static int fill_fq_range(struct fqid_range *fqid_r, u32 count)
+{
+ u32 fqid_base;
+ int ret;
+
+ ret = qman_alloc_fqid_range(&fqid_base, count, 0, 0);
+ if (ret != count) {
+ pr_err("Can't alloc enough fqid for capwap\n");
+ return -ENODEV;
+ }
+
+ fqid_r->fq_count = count;
+ fqid_r->fqid_base = fqid_base;
+
+ return 0;
+}
+
+static int capwap_alloc_fqs(void)
+{
+ int ret;
+
+ fqs = kzalloc(sizeof(struct dpaa_capwap_domain_fqs), GFP_KERNEL);
+ if (!fqs)
+ return -ENOMEM;
+
+ /* Four CAPWAP Tunnel + Non-CAPWAP */
+ ret = fill_fq_range(&fqs->inbound_eth_rx_fqs, CAPWAP_FLOW_COUNT + 1);
+ if (ret)
+ goto alloc_failed;
+
+ /* Two DTLS Tunnel */
+ ret = fill_fq_range(&fqs->inbound_sec_to_op_fqs, CAPWAP_FLOW_COUNT / 2);
+ if (ret)
+ goto alloc_failed;
+
+ /* Four CAPWAP Tunnel */
+ ret = fill_fq_range(&fqs->inbound_core_rx_fqs, CAPWAP_FLOW_COUNT);
+ if (ret)
+ goto alloc_failed;
+
+ /* Four CAPWAP Tunnel */
+ ret = fill_fq_range(&fqs->outbound_core_tx_fqs, CAPWAP_FLOW_COUNT);
+ if (ret)
+ goto alloc_failed;
+
+ /* The lower four flows are for sending back to OP (NON-DTLS Tunnel),
+ * or sending to SEC for encryption and then also sent back to OP.
+ * The upper four flows are for sending to Tx port after header
+ * manipulation
+ */
+ ret = fill_fq_range(&fqs->outbound_op_tx_fqs, CAPWAP_FLOW_COUNT * 2);
+ if (ret)
+ goto alloc_failed;
+
+ /* Two DTLS Tunnel */
+ ret = fill_fq_range(&fqs->outbound_sec_to_op_fqs,
+ CAPWAP_FLOW_COUNT / 2);
+ if (ret)
+ goto alloc_failed;
+
+ ret = qman_alloc_fqid_range(&fqs->debug_fqid, 1, 0, 0);
+ if (ret != 1) {
+ pr_err("Can't alloc enough fqid for capwap\n");
+ return -ENODEV;
+ }
+ return 0;
+
+alloc_failed:
+ kfree(fqs);
+ return ret;
+}
+
+struct dpaa_capwap_domain_fqs *get_domain_fqs(void)
+{
+ int ret;
+
+ if (!fqs) {
+ ret = capwap_alloc_fqs();
+ if (ret)
+ return NULL;
+ }
+ return fqs;
+}
+
+int capwap_fq_rx_init(struct qman_fq *fq, u32 fqid,
+ u16 channel, qman_cb_dqrr cb)
+{
+ struct qm_mcc_initfq opts;
+ int ret;
+
+ fq->cb.dqrr = cb;
+ ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq);
+ if (ret) {
+ pr_err("qman_create_fq() failed\n");
+ return ret;
+ }
+
+ opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
+ QM_INITFQ_WE_CONTEXTA;
+ opts.fqd.dest.channel = channel;
+ opts.fqd.dest.wq = 3;
+ /* FIXME: why would we want to keep an empty FQ in cache? */
+ opts.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
+ opts.fqd.fq_ctrl |= QM_FQCTRL_CTXASTASHING | QM_FQCTRL_AVOIDBLOCK;
+ opts.fqd.context_a.stashing.exclusive =
+ QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX |
+ QM_STASHING_EXCL_ANNOTATION;
+ opts.fqd.context_a.stashing.data_cl = 2;
+ opts.fqd.context_a.stashing.annotation_cl = 1;
+ opts.fqd.context_a.stashing.context_cl =
+ DIV_ROUND_UP(sizeof(struct qman_fq), 64);
+
+ ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
+ if (ret < 0) {
+ pr_err("qman_init_fq(%u) = %d\n",
+ qman_fq_fqid(fq), ret);
+ qman_destroy_fq(fq, 0);
+ return ret;
+ }
+
+ return 0;
+}
+
+int capwap_fq_tx_init(struct qman_fq *fq, u16 channel,
+ u64 context_a, u32 context_b)
+{
+ struct qm_mcc_initfq opts;
+ int ret;
+ uint32_t flags = QMAN_FQ_FLAG_TO_DCPORTAL;
+
+ if (!fq->fqid)
+ flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
+ ret = qman_create_fq(fq->fqid, flags, fq);
+ if (ret) {
+ pr_err("qman_create_fq() failed\n");
+ return ret;
+ }
+
+ opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL;
+ if (context_a)
+ opts.we_mask |= QM_INITFQ_WE_CONTEXTA;
+ if (context_b)
+ opts.we_mask |= QM_INITFQ_WE_CONTEXTB;
+ opts.fqd.dest.channel = channel;
+ opts.fqd.dest.wq = 3;
+
+ opts.fqd.context_b = context_b;
+ qm_fqd_context_a_set64(&opts.fqd, context_a);
+ ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
+ if (ret < 0) {
+ pr_err("qman_init_fq(%u) = %d\n",
+ qman_fq_fqid(fq), ret);
+ qman_destroy_fq(fq, 0);
+ return ret;
+ }
+
+ return 0;
+}
+
+void teardown_fq(struct qman_fq *fq)
+{
+ u32 flags;
+ int s = qman_retire_fq(fq, &flags);
+ if (s == 1) {
+ /* Retire is non-blocking, poll for completion */
+ enum qman_fq_state state;
+ do {
+ qman_poll();
+ qman_fq_state(fq, &state, &flags);
+ } while (state != qman_fq_state_retired);
+ if (flags & QMAN_FQ_STATE_NE) {
+ /* FQ isn't empty, drain it */
+ s = qman_volatile_dequeue(fq, 0,
+ QM_VDQCR_NUMFRAMES_TILLEMPTY);
+ BUG_ON(s);
+ /* Poll for completion */
+ do {
+ qman_poll();
+ qman_fq_state(fq, &state, &flags);
+ } while (flags & QMAN_FQ_STATE_VDQCR);
+ }
+ }
+ s = qman_oos_fq(fq);
+ BUG_ON(s);
+ qman_destroy_fq(fq, 0);
+}
+
+static void dump_hex(uint8_t *data, uint32_t count)
+{
+ uint32_t i;
+
+ for (i = 0; i < count; i++) {
+ if (!(i%16))
+ pr_info("\n%04x ", i);
+ else if (!(i%8))
+ pr_info(" ");
+ pr_info("%02x ", *data++);
+ }
+ pr_info("\n");
+}
+
+void dump_fd(const struct qm_fd *fd)
+{
+ u64 addr;
+ struct qm_sg_entry *sg_entry;
+ uint32_t len;
+ uint32_t final = 0;
+ uint8_t *data;
+
+ addr = qm_fd_addr_get64(fd);
+ pr_info("fd_status = 0x%08x\n", fd->status);
+ pr_info("fd_opaque= 0x%08x\n", fd->opaque);
+ pr_info("format is 0x%x\n", fd->format);
+ pr_info("bpid = %d\n", fd->bpid);
+ pr_info("addr=0x%llx, vaddr=0x%p\n", addr, phys_to_virt(fd->addr));
+
+ if (fd->format == qm_fd_sg) {/*short sg */
+ addr = qm_fd_addr(fd);
+ len = fd->length20;
+ pr_info("FD: addr = 0x%llx\n", addr);
+ pr_info(" offset=%d\n", fd->offset);
+ pr_info(" len = %d\n", len);
+ data = phys_to_virt(fd->addr);
+ data += fd->offset;
+ sg_entry = (struct qm_sg_entry *) data;
+ do {
+ addr = qm_sg_addr(sg_entry);
+ len = sg_entry->length;
+ final = sg_entry->final;
+ pr_info("SG ENTRY: addr = 0x%llx\n", addr);
+ pr_info(" len = %d\n", len);
+ pr_info(" bpid = %d\n", sg_entry->bpid);
+ pr_info(" extension = %d\n",
+ sg_entry->extension);
+ data = phys_to_virt(addr);
+ pr_info(" v-addr=%p\n", data);
+ data += sg_entry->offset;
+ dump_hex(data, len);
+ if (final)
+ break;
+ sg_entry++;
+ } while (1);
+ } else if (fd->format == qm_fd_contig) { /* short single */
+ addr = qm_fd_addr(fd);
+ len = fd->length20;
+ pr_info("FD: addr = 0x%llx\n", addr);
+ pr_info(" offset=%d\n", fd->offset);
+ pr_info(" len = %d\n", len);
+ data = phys_to_virt(addr);
+ pr_info(" v-addr=%p\n", data);
+ dump_hex(data, len + fd->offset);
+ }
+
+}
+
+static enum qman_cb_dqrr_result
+rx_def_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ struct net_device *net_dev;
+ struct dpa_priv_s *priv;
+ struct dpa_bp *dpa_bp;
+ const struct qm_fd *fd = &dq->fd;
+
+ pr_info("rx default dqrr\n");
+ net_dev = ((struct dpa_fq *)fq)->net_dev;
+ priv = netdev_priv(net_dev);
+
+ pr_info("op_rx_def_dqrr:fqid=0x%x, bpid = %d\n", fq->fqid, fd->bpid);
+ dpa_bp = dpa_bpid2pool(fd->bpid);
+ BUG_ON(!dpa_bp);
+
+
+ if (netif_msg_hw(priv) && net_ratelimit())
+ netdev_warn(net_dev, "FD status = 0x%08x\n",
+ fd->status & FM_FD_STAT_TX_ERRORS);
+
+ dump_fd(fd);
+
+ dpa_fd_release(net_dev, fd);
+
+ return qman_cb_dqrr_consume;
+}
+
+/* Initialize some fqs have no relation with detailed tunnel params */
+int capwap_fq_pre_init(struct dpaa_capwap_domain *capwap_domain)
+{
+ int ret;
+ struct dpa_fq *d_fq;
+ struct qman_fq *q_fq;
+ uint32_t fqid;
+ int i, j;
+ struct dpa_priv_s *net_priv;
+ u64 context_a;
+ u32 context_b;
+ uint16_t channel;
+ u32 debug_fqid;
+
+ net_priv = netdev_priv(capwap_domain->net_dev);
+
+/* Debug FQ initilization */
+ debug_fqid = capwap_domain->fqs->debug_fqid;
+ d_fq = kzalloc(sizeof(struct dpa_fq), GFP_KERNEL);
+ if (d_fq == NULL)
+ return -ENOMEM;
+ d_fq->net_dev = capwap_domain->net_dev;
+ ret = capwap_fq_rx_init(&d_fq->fq_base, debug_fqid, net_priv->channel,
+ rx_def_dqrr);
+ if (ret) {
+ pr_err("init debug fq failed\n");
+ kfree(d_fq);
+ return ret;
+ }
+
+
+/* Inbound fqs pre-initilization */
+ /* Four FQs to Core */
+ d_fq = kzalloc(sizeof(struct dpa_fq) * CAPWAP_FLOW_COUNT, GFP_KERNEL);
+ if (d_fq == NULL)
+ return -ENOMEM;
+ capwap_domain->fqs->inbound_core_rx_fqs.fq_base = d_fq;
+ for (i = 0; i < CAPWAP_FLOW_COUNT; i++) {
+ d_fq[i].net_dev = capwap_domain->net_dev;
+
+ fqid = capwap_domain->fqs->inbound_core_rx_fqs.fqid_base + i;
+ ret = capwap_fq_rx_init(&d_fq[i].fq_base, fqid,
+ net_priv->channel, rx_cbs[i]);
+ if (ret) {
+ for (j = 0; j < i; j++)
+ qman_destroy_fq(&d_fq[j].fq_base, 0);
+ kfree(d_fq);
+ return ret;
+ }
+ }
+
+ /* Two Fqs from SEC to OP */
+ q_fq = kzalloc(sizeof(struct qman_fq) * CAPWAP_FLOW_COUNT / 2,
+ GFP_KERNEL);
+ if (!q_fq)
+ return -ENOMEM;
+ capwap_domain->fqs->inbound_sec_to_op_fqs.fq_base = q_fq;
+ for (i = 0; i < CAPWAP_FLOW_COUNT / 2; i++) {
+ q_fq[i].fqid =
+ capwap_domain->fqs->inbound_sec_to_op_fqs.fqid_base + i;
+ channel = capwap_domain->post_dec_op_port.tx_ch;
+ context_a = (u64)1 << 63;
+ /* a1v */
+ context_a |= (u64)1 << 61;
+ /* flowid for a1 */
+ context_a |= (u64)i << (32 + 4);
+ /* SpOperCode for DTLS Decap */
+ context_a |= (u64)9 << 32;
+ context_b =
+ capwap_domain->fqs->inbound_core_rx_fqs.fqid_base + i;
+ ret = capwap_fq_tx_init(&q_fq[i], channel, context_a,
+ context_b);
+ if (ret) {
+ for (j = 0; j < i; j++)
+ qman_destroy_fq(&q_fq[j], 0);
+ kfree(q_fq);
+ return ret;
+ }
+ }
+
+ /* Four Fqs from Rx port */
+ q_fq = kzalloc(sizeof(struct qman_fq) * CAPWAP_FLOW_COUNT, GFP_KERNEL);
+ if (!q_fq)
+ return -ENOMEM;
+ capwap_domain->fqs->inbound_eth_rx_fqs.fq_base = q_fq;
+ /* Just initizlize two fqs for NON-DTLS flows from rx port to OP,
+ * the other fqs for DTLS flows from rx port to SEC is dynamicly
+ * initialized
+ */
+ for (i = CAPWAP_FLOW_COUNT / 2; i < CAPWAP_FLOW_COUNT; i++) {
+ q_fq[i].fqid =
+ capwap_domain->fqs->inbound_eth_rx_fqs.fqid_base + i;
+ channel = capwap_domain->post_dec_op_port.tx_ch;
+ context_a = (u64)1 << 63;
+ /* a1v */
+ context_a |= (u64)1 << 61;
+ /* flowid for a1 */
+ context_a |= (u64)i << (32 + 4);
+ context_b =
+ capwap_domain->fqs->inbound_core_rx_fqs.fqid_base + i;
+ ret = capwap_fq_tx_init(&q_fq[i], channel, context_a,
+ context_b);
+ if (ret) {
+ for (j = 0; j < i; j++)
+ qman_destroy_fq(&q_fq[j], 0);
+ kfree(q_fq);
+ return ret;
+ }
+ }
+
+/* Outbound fqs pre-initilization */
+ /* Eight Fqs from OP tx */
+ q_fq = kzalloc(sizeof(struct qman_fq) * CAPWAP_FLOW_COUNT * 2,
+ GFP_KERNEL);
+ if (!q_fq)
+ return -ENOMEM;
+ capwap_domain->fqs->outbound_op_tx_fqs.fq_base = q_fq;
+
+ /* The upper four flows are for sending to Tx port after header
+ * manipulation
+ */
+ for (i = CAPWAP_FLOW_COUNT; i < CAPWAP_FLOW_COUNT * 2; i++) {
+ q_fq[i].fqid =
+ capwap_domain->fqs->outbound_op_tx_fqs.fqid_base + i;
+ channel = (uint16_t)
+ fm_get_tx_port_channel(net_priv->mac_dev->port_dev[TX]);
+ context_a = (u64)1 << 63;
+#ifdef CONFIG_FMAN_T4240
+ /* Configure the Tx queues for recycled frames, such that the
+ * buffers are released by FMan and no confirmation is sent
+ */
+#define FMAN_V3_CONTEXTA_EN_A2V 0x10000000
+#define FMAN_V3_CONTEXTA_EN_OVOM 0x02000000
+#define FMAN_V3_CONTEXTA_EN_EBD 0x80000000
+ context_a |= (((uint64_t) FMAN_V3_CONTEXTA_EN_A2V |
+ FMAN_V3_CONTEXTA_EN_OVOM) << 32) |
+ FMAN_V3_CONTEXTA_EN_EBD;
+#endif
+ context_b = 0;
+ ret = capwap_fq_tx_init(&q_fq[i], channel, context_a,
+ context_b);
+ if (ret) {
+ for (j = CAPWAP_FLOW_COUNT; j < i; j++)
+ qman_destroy_fq(&q_fq[j], 0);
+ kfree(q_fq);
+ return ret;
+ }
+ }
+ /* The lower four flows:
+ * 1 & 2 are for DTLS Tunnel, and are dynamically initilized when
+ * insert tunnel;
+ * 3 & 4 are for NON-DTLS Tunnel and sent back to OP, and are
+ * initilized here
+ */
+ for (i = CAPWAP_FLOW_COUNT / 2; i < CAPWAP_FLOW_COUNT; i++) {
+ q_fq[i].fqid =
+ capwap_domain->fqs->outbound_op_tx_fqs.fqid_base + i;
+ channel = capwap_domain->out_op_port.tx_ch;
+ context_a = (u64)1 << 63;
+ /* a1v */
+ context_a |= (u64)1 << 61;
+ /* flowid for a1, Upper flow for OP */
+ context_a |= (u64)(i + CAPWAP_FLOW_COUNT) << (32 + 4);
+ context_b = capwap_domain->fqs->outbound_op_tx_fqs.fqid_base +
+ i + CAPWAP_FLOW_COUNT;
+ ret = capwap_fq_tx_init(&q_fq[i], channel, context_a,
+ context_b);
+ if (ret) {
+ for (j = CAPWAP_FLOW_COUNT / 2; j < i; j++)
+ qman_destroy_fq(&q_fq[j], 0);
+ for (j = CAPWAP_FLOW_COUNT;
+ j < CAPWAP_FLOW_COUNT * 2; j++)
+ qman_destroy_fq(&q_fq[j], 0);
+ kfree(q_fq);
+ return ret;
+ }
+ }
+
+ /* Two Fqs from SEC to OP */
+ q_fq = kzalloc(sizeof(struct qman_fq) * CAPWAP_FLOW_COUNT / 2,
+ GFP_KERNEL);
+ if (!q_fq)
+ return -ENOMEM;
+ capwap_domain->fqs->outbound_sec_to_op_fqs.fq_base = q_fq;
+ for (i = 0; i < CAPWAP_FLOW_COUNT / 2; i++) {
+ q_fq[i].fqid = capwap_domain->fqs->
+ outbound_sec_to_op_fqs.fqid_base + i;
+ channel = capwap_domain->out_op_port.tx_ch;
+ context_a = (u64)1 << 63;
+ /* a1v */
+ context_a |= (u64)1 << 61;
+ /* flowid for a1, Upper flow for OP */
+ context_a |= (u64)(i + CAPWAP_FLOW_COUNT) << (32 + 4);
+ /* SpOperCode for DTLS Encap */
+ context_a |= (u64) 10 << 32;
+ context_b = capwap_domain->fqs->outbound_op_tx_fqs.fqid_base +
+ i + CAPWAP_FLOW_COUNT;
+ ret = capwap_fq_tx_init(&q_fq[i], channel, context_a,
+ context_b);
+ if (ret) {
+ for (j = 0; j < i; j++)
+ qman_destroy_fq(&q_fq[j], 0);
+ kfree(q_fq);
+ return ret;
+ }
+ }
+
+ /* Four Fqs from Core to OP port */
+ q_fq = kzalloc(sizeof(struct qman_fq) * CAPWAP_FLOW_COUNT, GFP_KERNEL);
+ if (!q_fq)
+ return -ENOMEM;
+ capwap_domain->fqs->outbound_core_tx_fqs.fq_base = q_fq;
+ for (i = 0; i < CAPWAP_FLOW_COUNT; i++) {
+ q_fq[i].fqid = capwap_domain->fqs->
+ outbound_core_tx_fqs.fqid_base + i;
+ channel = capwap_domain->out_op_port.tx_ch;
+ context_a = (u64)1 << 63;
+ /* a1v */
+ context_a |= (u64)1 << 61;
+ /* flowid for a1, Lower flow for OP*/
+ context_a |= (u64)i << (32 + 4);
+ context_b = 0;
+ ret = capwap_fq_tx_init(&q_fq[i], channel, context_a,
+ context_b);
+ if (ret) {
+ for (j = 0; j < i; j++)
+ qman_destroy_fq(&q_fq[j], 0);
+ kfree(q_fq);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int capwap_kernel_rx_ctl(struct capwap_domain_kernel_rx_ctl *rx_ctl)
+{
+ u32 fqid;
+ uint16_t flow_index;
+ struct dpa_fq *fq_base, *d_fq;
+ struct dpa_priv_s *net_priv;
+ struct dpaa_capwap_domain *capwap_domain =
+ (struct dpaa_capwap_domain *)rx_ctl->capwap_domain_id;
+ int ret = 0;
+
+ if (capwap_domain == NULL)
+ return -EINVAL;
+
+ net_priv = netdev_priv(capwap_domain->net_dev);
+ flow_index = get_flow_index(rx_ctl->is_dtls, rx_ctl->is_control);
+
+ fqid = capwap_domain->fqs->inbound_core_rx_fqs.fqid_base + flow_index;
+
+ fq_base = (struct dpa_fq *)capwap_domain->fqs->
+ inbound_core_rx_fqs.fq_base;
+ d_fq = &fq_base[flow_index];
+
+ if (rx_ctl->on) {
+ if (d_fq->fq_base.fqid == fqid) {
+ pr_err("CAPWAP %s-%s tunnel kernel Rx is already on\n",
+ rx_ctl->is_control ? "control" : "data",
+ rx_ctl->is_dtls ? "dtls" : "non-dtls");
+ return -EINVAL;
+ }
+
+ d_fq->net_dev = capwap_domain->net_dev;
+ ret = capwap_fq_rx_init(&d_fq->fq_base, fqid, net_priv->channel,
+ rx_cbs[flow_index]);
+ if (ret) {
+ memset(d_fq, 0, sizeof(struct dpa_fq));
+ return ret;
+ }
+ } else {
+ if (!d_fq->fq_base.fqid) {
+ pr_err("CAPWAP %s-%s tunnel kernel Rx is already off\n",
+ rx_ctl->is_control ? "control" : "data",
+ rx_ctl->is_dtls ? "dtls" : "non-dtls");
+ return -EINVAL;
+ }
+ teardown_fq(&d_fq->fq_base);
+ memset(d_fq, 0, sizeof(struct dpa_fq));
+ }
+ rx_ctl->fqid = fqid;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_fq.h b/drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_fq.h
new file mode 100644
index 0000000..73a3f07
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_fq.h
@@ -0,0 +1,44 @@
+/* Copyright 2014 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DPAA_CAPWAP_FQ_H__
+#define __DPAA_CAPWAP_FQ_H__
+
+int capwap_fq_rx_init(struct qman_fq *fq, u32 fqid,
+ u16 channel, qman_cb_dqrr cb);
+
+
+int capwap_fq_tx_init(struct qman_fq *fq, u16 channel,
+ u64 context_a, u32 context_b);
+
+void teardown_fq(struct qman_fq *fq);
+
+#endif /* __DPAA_CAPWAP_FQ_H__ */
diff --git a/drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_ioctl.c b/drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_ioctl.c
new file mode 100644
index 0000000..a8d9a1f
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_ioctl.c
@@ -0,0 +1,588 @@
+/* Copyright (c) 2014 Freescale Semiconductor, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+
+#include "dpaa_capwap_ioctl.h"
+#include "dpaa_capwap_domain.h"
+
+#include <linux/fdtable.h>
+#include "lnxwrp_fm.h"
+
+#define DRV_VERSION "0.1"
+
+static int dpa_capwap_cdev_major = -1;
+static struct class *capwap_class;
+static struct device *capwap_dev;
+static struct list_head tunnel_list_head;
+
+struct tunnel_info {
+ struct list_head tunnel_list;
+ bool is_control;
+ bool is_dtls;
+ enum dpaa_capwap_domain_direction dir;
+ void *tunnel_id;
+};
+
+
+struct t_device {
+ uintptr_t id; /**< the device id */
+ int fd; /**< the device file descriptor */
+ t_Handle h_UserPriv;
+ uint32_t owners;
+};
+
+
+int wrp_dpa_capwap_open(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+
+int wrp_dpa_capwap_release(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+long wrp_dpa_capwap_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long args)
+{
+ long ret = 0;
+
+ switch (cmd) {
+ case DPA_CAPWAP_IOC_DOMAIN_GET_FQIDS: {
+ struct dpaa_capwap_domain_fqs *fqs;
+
+ fqs = get_domain_fqs();
+ if (fqs == NULL)
+ return -ENODEV;
+
+ if (copy_to_user((void *)args, fqs,
+ sizeof(struct dpaa_capwap_domain_fqs))) {
+ pr_err("Could not copy DPA CAPWAP FQID base to user\n");
+ return -EINVAL;
+ }
+ break;
+ }
+ case DPA_CAPWAP_IOC_DOMAIN_INIT: {
+ struct dpaa_capwap_domain_params domain_params;
+ struct dpaa_capwap_domain *capwap_domain = NULL;
+ struct file *fm_pcd_file, *fm_port_file;
+ t_LnxWrpFmDev *fm_wrapper_dev;
+ t_LnxWrpFmPortDev *port_wrapper_dev;
+ struct t_device *dev;
+
+ /* Copy parameters from user-space */
+ if (copy_from_user(&domain_params, (void *)args,
+ sizeof(domain_params))) {
+ pr_err("Could not copy DPA CAPWAP init parameters\n");
+ return -EINVAL;
+ }
+
+ /* Translate FM_PCD file descriptor */
+ fm_pcd_file = fcheck((unsigned long)domain_params.h_fm_pcd);
+ if (!fm_pcd_file) {
+ pr_err("Could not acquire PCD handle\n");
+ return -EINVAL;
+ }
+ fm_wrapper_dev = ((t_LnxWrpFmDev *)fm_pcd_file->private_data);
+ domain_params.h_fm_pcd = (void *)fm_wrapper_dev->h_PcdDev;
+
+ /* Translate FM_Port file descriptor */
+ fm_port_file = fcheck((unsigned long)
+ domain_params.outbound_op.port_handle);
+ if (!fm_port_file) {
+ pr_err("Could not acquire FM Port handle\n");
+ return -EINVAL;
+ }
+ port_wrapper_dev = ((t_LnxWrpFmPortDev *)
+ fm_port_file->private_data);
+ domain_params.outbound_op.port_handle = (void *)
+ port_wrapper_dev->h_Dev;
+
+ /* Translate CCNode handle */
+ dev = domain_params.inbound_pre_params.h_Table;
+ domain_params.inbound_pre_params.h_Table = (void *)dev->id;
+
+ capwap_domain = (struct dpaa_capwap_domain *)
+ dpaa_capwap_domain_config(&domain_params);
+ if (!capwap_domain)
+ return -EINVAL;
+ ret = dpaa_capwap_domain_init(capwap_domain);
+ if (ret < 0)
+ return ret;
+
+ domain_params.id = capwap_domain;
+ if (copy_to_user((void *)args, &domain_params,
+ sizeof(domain_params))) {
+ pr_err("Could not copy DPA CAPWAP ID to user the ID\n");
+ return -EINVAL;
+ }
+
+ break;
+ }
+
+ case DPA_CAPWAP_IOC_DOMAIN_ADD_IN_TUNNEL: {
+ struct dpaa_capwap_tunnel *in_tunnel = NULL;
+ struct dpaa_capwap_domain *capwap_domain = NULL;
+ struct dpaa_capwap_domain_tunnel_in_params in_tunnel_params;
+ struct tunnel_info *tunnel_node, *new_tunnel;
+
+ /* Copy parameters from user-space */
+ if (copy_from_user(&in_tunnel_params, (void *)args,
+ sizeof(in_tunnel_params))) {
+ pr_err("Could not copy DPA CAPWAP Add-In-Tunnel parameters\n");
+ return -EINVAL;
+ }
+
+ capwap_domain = (struct dpaa_capwap_domain *)
+ in_tunnel_params.capwap_domain_id;
+ if (!capwap_domain)
+ return -EINVAL;
+
+ list_for_each_entry(tunnel_node, &tunnel_list_head,
+ tunnel_list) {
+ if (tunnel_node->is_dtls == in_tunnel_params.dtls &&
+ tunnel_node->is_control ==
+ in_tunnel_params.is_control &&
+ tunnel_node->dir ==
+ e_DPAA_CAPWAP_DOMAIN_DIR_INBOUND) {
+ pr_err("%s-%s inbound tunnel already exist, please remove it firstly\n",
+ tunnel_node->is_dtls ? "DTLS" :
+ "N-DTLS",
+ tunnel_node->is_control ?
+ "Control" : "Data");
+ return -EINVAL;
+ }
+ }
+
+ in_tunnel = dequeue_tunnel_obj(&capwap_domain->in_tunnel_list);
+ if (!in_tunnel) {
+ pr_err("You've reached the maximum number of inbound tunnels\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(in_tunnel->auth_data.auth_key,
+ in_tunnel_params.dtls_params.auth_key,
+ in_tunnel_params.dtls_params
+ .auth_key_len / 8)) {
+ pr_err("Could not copy auth key from user space\n");
+ return -EINVAL;
+ }
+ if (copy_from_user(in_tunnel->cipher_data.cipher_key,
+ in_tunnel_params.dtls_params.cipher_key,
+ in_tunnel_params.dtls_params
+ .cipher_key_len / 8)) {
+ pr_err("Could not copy cipher key from user space\n");
+ return -EINVAL;
+ }
+
+ ret = add_in_tunnel(capwap_domain, in_tunnel,
+ &in_tunnel_params);
+ if (ret < 0)
+ return ret;
+ in_tunnel_params.tunnel_id = in_tunnel;
+
+ new_tunnel = kzalloc(sizeof(struct tunnel_info), GFP_KERNEL);
+ new_tunnel->is_control = in_tunnel_params.is_control;
+ new_tunnel->is_dtls = in_tunnel_params.dtls;
+ new_tunnel->tunnel_id = in_tunnel_params.tunnel_id;
+ new_tunnel->dir = e_DPAA_CAPWAP_DOMAIN_DIR_INBOUND;
+ list_add_tail(&new_tunnel->tunnel_list, &tunnel_list_head);
+
+ if (copy_to_user((void *)args, &in_tunnel_params,
+ sizeof(in_tunnel_params))) {
+ pr_err("Could not copy DPA CAPWAP ID to user the ID\n");
+ return -EINVAL;
+ }
+
+
+ break;
+ }
+
+ case DPA_CAPWAP_IOC_DOMAIN_ADD_OUT_TUNNEL: {
+ struct dpaa_capwap_tunnel *out_tunnel = NULL;
+ struct dpaa_capwap_domain *capwap_domain = NULL;
+ struct dpaa_capwap_domain_tunnel_out_params out_tunnel_params;
+ struct tunnel_info *tunnel_node, *new_tunnel;
+ uint8_t *buf;
+
+ /* Copy parameters from user-space */
+ if (copy_from_user(&out_tunnel_params, (void *)args,
+ sizeof(out_tunnel_params))) {
+ pr_err("Could not copy DPA CAPWAP Add-Out-Tunnel parameters\n");
+ return -EINVAL;
+ }
+
+ if (!out_tunnel_params.p_ether_header &&
+ out_tunnel_params.eth_header_size != 0) {
+ buf = kzalloc(out_tunnel_params.eth_header_size,
+ GFP_KERNEL);
+ if (buf == NULL) {
+ pr_err("No memory for ether header in %s\n",
+ __func__);
+ return -ENOMEM;
+ }
+ if (copy_from_user(buf,
+ out_tunnel_params.p_ether_header,
+ out_tunnel_params.eth_header_size)) {
+ pr_err("Could not copy ether header from user space:%s\n",
+ __func__);
+ kfree(buf);
+ return -EINVAL;
+ }
+ out_tunnel_params.p_ether_header = buf;
+ }
+ if (!out_tunnel_params.p_ip_header &&
+ out_tunnel_params.ip_header_size != 0) {
+ buf = kzalloc(out_tunnel_params.ip_header_size,
+ GFP_KERNEL);
+ if (buf == NULL) {
+ pr_err("No memory for IP header in %s\n",
+ __func__);
+ kfree(out_tunnel_params.p_ether_header);
+ return -ENOMEM;
+ }
+ if (copy_from_user(buf, out_tunnel_params.p_ip_header,
+ out_tunnel_params.ip_header_size)) {
+ pr_err("Could not copy IP header from user space:%s\n",
+ __func__);
+ kfree(out_tunnel_params.p_ether_header);
+ kfree(buf);
+ return -EINVAL;
+ }
+ out_tunnel_params.p_ip_header = buf;
+ }
+ if (!out_tunnel_params.p_udp_header) {
+ buf = kzalloc(UDP_HDR_SIZE, GFP_KERNEL);
+ if (buf == NULL) {
+ pr_err("No memory for UDP header in %s\n",
+ __func__);
+ kfree(out_tunnel_params.p_ether_header);
+ kfree(out_tunnel_params.p_ip_header);
+ return -ENOMEM;
+ }
+ if (copy_from_user(buf, out_tunnel_params.p_udp_header,
+ UDP_HDR_SIZE)) {
+ pr_err("Could not copy UDP header from user space:%s\n",
+ __func__);
+ kfree(out_tunnel_params.p_ether_header);
+ kfree(out_tunnel_params.p_ip_header);
+ kfree(buf);
+ return -EINVAL;
+ }
+ out_tunnel_params.p_udp_header = buf;
+ }
+ if (!out_tunnel_params.p_capwap_header &&
+ out_tunnel_params.capwap_header_size != 0) {
+ buf = kzalloc(out_tunnel_params.capwap_header_size,
+ GFP_KERNEL);
+ if (buf == NULL) {
+ pr_err("No memory for CAPWAP header in %s\n",
+ __func__);
+ kfree(out_tunnel_params.p_ether_header);
+ kfree(out_tunnel_params.p_ip_header);
+ kfree(out_tunnel_params.p_udp_header);
+ return -ENOMEM;
+ }
+ if (copy_from_user(buf,
+ out_tunnel_params.p_capwap_header,
+ out_tunnel_params.capwap_header_size)) {
+ pr_err("Could not copy CAPWAP header from user space:%s\n",
+ __func__);
+ kfree(out_tunnel_params.p_ether_header);
+ kfree(out_tunnel_params.p_ip_header);
+ kfree(out_tunnel_params.p_udp_header);
+ kfree(buf);
+ return -EINVAL;
+ }
+ out_tunnel_params.p_capwap_header = buf;
+ }
+
+ capwap_domain = (struct dpaa_capwap_domain *)
+ out_tunnel_params.capwap_domain_id;
+ if (!capwap_domain) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ list_for_each_entry(tunnel_node, &tunnel_list_head,
+ tunnel_list) {
+ if (tunnel_node->is_dtls == out_tunnel_params.dtls &&
+ tunnel_node->is_control ==
+ out_tunnel_params.is_control &&
+ tunnel_node->dir ==
+ e_DPAA_CAPWAP_DOMAIN_DIR_OUTBOUND) {
+ pr_err("%s-%s outbound tunnel already exist, please remove it firstly\n",
+ tunnel_node->is_dtls ?
+ "DTLS" : "N-DTLS",
+ tunnel_node->is_control ?
+ "Control" : "Data");
+ ret = -EINVAL;
+ goto err_out;
+ }
+ }
+
+ out_tunnel =
+ dequeue_tunnel_obj(&capwap_domain->out_tunnel_list);
+ if (!out_tunnel) {
+ pr_err("You've reached the maximum number of inbound tunnels\n");
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ if (copy_from_user(out_tunnel->auth_data.auth_key,
+ out_tunnel_params.dtls_params.auth_key,
+ out_tunnel_params.dtls_params
+ .auth_key_len / 8)) {
+ pr_err("Could not copy auth key from user space\n");
+ ret = -EINVAL;
+ goto err_out;
+ }
+ if (copy_from_user(out_tunnel->cipher_data.cipher_key,
+ out_tunnel_params.dtls_params.cipher_key,
+ out_tunnel_params.dtls_params
+ .cipher_key_len / 8)) {
+ pr_err("Could not copy cipher key from user space\n");
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ ret = add_out_tunnel(capwap_domain, out_tunnel,
+ &out_tunnel_params);
+ if (ret < 0)
+ goto err_out;
+ out_tunnel_params.tunnel_id = out_tunnel;
+
+ new_tunnel = kzalloc(sizeof(struct tunnel_info), GFP_KERNEL);
+ if (new_tunnel == NULL) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+ new_tunnel->is_control = out_tunnel_params.is_control;
+ new_tunnel->is_dtls = out_tunnel_params.dtls;
+ new_tunnel->tunnel_id = out_tunnel_params.tunnel_id;
+ new_tunnel->dir = e_DPAA_CAPWAP_DOMAIN_DIR_OUTBOUND;
+ list_add_tail(&new_tunnel->tunnel_list, &tunnel_list_head);
+
+ if (copy_to_user((void *)args, &out_tunnel_params,
+ sizeof(out_tunnel_params))) {
+ pr_err("Could not copy DPA CAPWAP ID to user the ID\n");
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ break;
+err_out:
+ kfree(out_tunnel_params.p_ether_header);
+ kfree(out_tunnel_params.p_ip_header);
+ kfree(out_tunnel_params.p_udp_header);
+ kfree(out_tunnel_params.p_capwap_header);
+ kfree(new_tunnel);
+ return ret;
+
+ }
+
+ case DPA_CAPWAP_IOC_DOMAIN_REMOVE_TUNNEL: {
+ struct dpaa_capwap_tunnel *tunnel = NULL;
+ struct tunnel_info *tunnel_node;
+ int is_found = 0;
+
+ /* Copy parameters from user-space */
+ if (copy_from_user(&tunnel, (void *)args, sizeof(void *))) {
+ pr_err("Could not copy DPA CAPWAP Remove-Tunnel parameters\n");
+ return -EINVAL;
+ }
+ list_for_each_entry(tunnel_node, &tunnel_list_head,
+ tunnel_list) {
+ if (tunnel_node->tunnel_id == tunnel) {
+ is_found = 1;
+ break;
+ }
+ }
+ if (is_found) {
+ dpaa_capwap_domain_remove_tunnel(tunnel);
+ list_del(&tunnel_node->tunnel_list);
+ kfree(tunnel_node);
+ } else
+ return -EINVAL;
+
+ break;
+ }
+ case DPA_CAPWAP_IOC_DOMAIN_KERNAL_RX_CTL: {
+ struct capwap_domain_kernel_rx_ctl rx_ctl;
+
+ /* Copy parameters from user-space */
+ if (copy_from_user(&rx_ctl, (void *)args,
+ sizeof(struct capwap_domain_kernel_rx_ctl))) {
+ pr_err("Could not copy DPA CAPWAP Remove-Tunnel parameters\n");
+ return -EINVAL;
+ }
+
+ ret = capwap_kernel_rx_ctl(&rx_ctl);
+ if (ret)
+ return ret;
+
+ if (copy_to_user((void *)args, &rx_ctl,
+ sizeof(struct capwap_domain_kernel_rx_ctl))) {
+ pr_err("Could not copy DPA CAPWAP rx fqid to user space\n");
+ return -EINVAL;
+ }
+
+ break;
+ }
+ default:
+ pr_err("Invalid DPA CAPWAP ioctl (0x%x)\n", cmd);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+#ifdef CONFIG_COMPAT
+long wrp_dpa_capwap_ioctl_compat(struct file *filp, unsigned int cmd,
+ unsigned long args)
+{
+ long ret = 0;
+
+ return ret;
+}
+#endif
+
+static const struct file_operations dpa_capwap_fops = {
+ .owner = THIS_MODULE,
+ .open = wrp_dpa_capwap_open,
+ .read = NULL,
+ .write = NULL,
+ .unlocked_ioctl = wrp_dpa_capwap_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = wrp_dpa_capwap_ioctl_compat,
+#endif
+ .release = wrp_dpa_capwap_release
+};
+
+static ssize_t domain_show_statistic(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t bytes = 0;
+ struct tunnel_info *tunnel_node;
+ t_FmPcdManipStats manip_stats;
+ struct dpaa_capwap_tunnel *capwap_tunnel = NULL;
+ int ret;
+
+ list_for_each_entry(tunnel_node, &tunnel_list_head, tunnel_list) {
+ capwap_tunnel = (struct dpaa_capwap_tunnel *)tunnel_node->tunnel_id;
+ if (capwap_tunnel->h_capwap_frag) {
+ memset(&manip_stats, 0, sizeof(manip_stats));
+ ret = FM_PCD_ManipGetStatistics(capwap_tunnel->h_capwap_frag,
+ &manip_stats);
+ if (!ret) {
+ bytes += sprintf(buf + bytes, "%s-%s Tunnel:\n",
+ tunnel_node->is_dtls ?
+ "DTLS" : "N-DTLS",
+ tunnel_node->is_control ?
+ "Control" : "Data");
+ bytes += sprintf(buf + bytes,
+ "\tfrag-total-count: %u\n",
+ manip_stats.u.frag.u.capwapFrag.totalFrames);
+ }
+ }
+ }
+
+ return bytes;
+}
+
+static DEVICE_ATTR(domain_statistic, S_IRUGO, domain_show_statistic, NULL);
+
+static int __init wrp_dpa_capwap_init(void)
+{
+ /* Cannot initialize the wrapper twice */
+ if (dpa_capwap_cdev_major >= 0)
+ return -EBUSY;
+
+ dpa_capwap_cdev_major =
+ register_chrdev(0, DPA_CAPWAP_CDEV, &dpa_capwap_fops);
+ if (dpa_capwap_cdev_major < 0) {
+ pr_err("Could not register DPA CAPWAP character device\n");
+ return dpa_capwap_cdev_major;
+ }
+
+ capwap_class = class_create(THIS_MODULE, DPA_CAPWAP_CDEV);
+ if (IS_ERR(capwap_class)) {
+ pr_err("Cannot create DPA CAPWAP class device\n");
+ unregister_chrdev(dpa_capwap_cdev_major, DPA_CAPWAP_CDEV);
+ dpa_capwap_cdev_major = -1;
+ return PTR_ERR(capwap_class);
+ }
+
+ capwap_dev = device_create(capwap_class, NULL,
+ MKDEV(dpa_capwap_cdev_major, 0), NULL,
+ DPA_CAPWAP_CDEV);
+ if (IS_ERR(capwap_dev)) {
+ pr_err("Cannot create DPA CAPWAP device\n");
+ class_destroy(capwap_class);
+ unregister_chrdev(dpa_capwap_cdev_major, DPA_CAPWAP_CDEV);
+ dpa_capwap_cdev_major = -1;
+ return PTR_ERR(capwap_dev);
+ }
+ INIT_LIST_HEAD(&tunnel_list_head);
+
+ if (device_create_file(capwap_dev, &dev_attr_domain_statistic))
+ dev_err(capwap_dev, "Error creating sysfs file\n");
+
+ pr_info("DPAA CAPWAP Domain driver v%s", DRV_VERSION);
+
+ return 0;
+}
+
+
+static void __exit wrp_dpa_capwap_exit(void)
+{
+ device_destroy(capwap_class, MKDEV(dpa_capwap_cdev_major, 0));
+ class_destroy(capwap_class);
+ unregister_chrdev(dpa_capwap_cdev_major, DPA_CAPWAP_CDEV);
+ dpa_capwap_cdev_major = -1;
+}
+
+module_init(wrp_dpa_capwap_init);
+module_exit(wrp_dpa_capwap_exit);
+
+MODULE_AUTHOR("Freescale, <freescale.com>");
+MODULE_DESCRIPTION("DPA CAPWAP Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_ioctl.h b/drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_ioctl.h
new file mode 100644
index 0000000..070a22f
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_ioctl.h
@@ -0,0 +1,57 @@
+/* Copyright (c) 2014 Freescale Semiconductor, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#define DPA_CAPWAP_CDEV "fsl-capwap"
+
+#define DPA_CAPWAP_IOC_MAGIC 0xee
+
+#define DPA_CAPWAP_IOC_DOMAIN_INIT \
+ _IOWR(DPA_CAPWAP_IOC_MAGIC, 0, struct dpaa_capwap_domain_params)
+
+#define DPA_CAPWAP_IOC_DOMAIN_ADD_IN_TUNNEL \
+ _IOWR(DPA_CAPWAP_IOC_MAGIC, 1, \
+ struct dpaa_capwap_domain_tunnel_in_params)
+
+#define DPA_CAPWAP_IOC_DOMAIN_ADD_OUT_TUNNEL \
+ _IOWR(DPA_CAPWAP_IOC_MAGIC, 2, \
+ struct dpaa_capwap_domain_tunnel_out_params)
+
+#define DPA_CAPWAP_IOC_DOMAIN_GET_FQIDS \
+ _IOWR(DPA_CAPWAP_IOC_MAGIC, 3, struct dpaa_capwap_domain_fqs)
+
+#define DPA_CAPWAP_IOC_DOMAIN_REMOVE_TUNNEL \
+ _IOWR(DPA_CAPWAP_IOC_MAGIC, 4, void *)
+
+#define DPA_CAPWAP_IOC_DOMAIN_KERNAL_RX_CTL \
+ _IOWR(DPA_CAPWAP_IOC_MAGIC, 5, \
+ struct capwap_domain_kernel_rx_ctl)
diff --git a/drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_op.c b/drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_op.c
new file mode 100644
index 0000000..3074a68
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_op.c
@@ -0,0 +1,96 @@
+/* Copyright 2014 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <compat.h>
+#include <linux/kthread.h>
+#include "lnxwrp_fm.h"
+#include "../offline_port.h"
+#include "../dpaa_eth.h"
+#include "../dpaa_eth_common.h"
+#include "dpaa_capwap.h"
+#include "dpaa_capwap_fq.h"
+#include "dpaa_capwap_domain.h"
+
+uint8_t get_capwap_bpid(struct net_device *net_dev)
+{
+ struct dpa_priv_s *priv;
+
+ priv = netdev_priv(net_dev);
+ return priv->dpa_bp->bpid;
+}
+
+int op_init(struct t_Port *port, struct net_device *net_dev)
+{
+ struct device_node *dpa_oh_node;
+ bool is_found = false;
+ struct platform_device *oh_dev;
+ struct dpa_oh_config_s *oh_config;
+ int ret;
+ uint16_t channel;
+ struct dpa_fq *fq;
+ uint32_t def_fqid, err_fqid;
+ t_LnxWrpFmPortDev *fm_port_dev = NULL;
+ t_LnxWrpFmDev *fm_dev = NULL;
+ struct task_struct *kth;
+ static struct of_device_id dpa_oh_node_of_match[] = {
+ { .compatible = "fsl,dpa-oh", },
+ { /* end of list */ },
+ };
+
+
+ for_each_matching_node(dpa_oh_node, dpa_oh_node_of_match) {
+ oh_dev = of_find_device_by_node(dpa_oh_node);
+ oh_config = dev_get_drvdata(&oh_dev->dev);
+ if (oh_config == NULL)
+ continue;
+
+ fm_port_dev = (t_LnxWrpFmPortDev *)oh_config->oh_port;
+ fm_dev = fm_port_dev->h_LnxWrpFmDev;
+ if (fm_port_dev->settings.param.portId == port->port_id &&
+ fm_dev->id == port->fm_id) {
+ is_found = true;
+ port->tx_ch = fm_port_dev->txCh;
+ def_fqid = oh_config->error_fqid;
+ err_fqid = oh_config->default_fqid;
+ break;
+ }
+ }
+
+ if (!is_found) {
+ pr_err("Can't found this dpa OH port:%d,%d\n", port->fm_id,
+ port->port_id);
+ return -ENODEV;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_tunnel.c b/drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_tunnel.c
new file mode 100644
index 0000000..7e7c339
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpa/capwap/dpaa_capwap_tunnel.c
@@ -0,0 +1,493 @@
+/* Copyright 2013-2014 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/if_vlan.h>
+#include <linux/fsl_bman.h>
+#include <linux/poll.h>
+
+#include "dpaa_eth_common.h"
+#include "dpaa_capwap.h"
+#include "dpaa_capwap_domain.h"
+
+#define MAX_LIST_LENGTH 2000
+
+
+struct capwap_dtls_list {
+ struct qm_fd *fd;
+ struct list_head list;
+};
+
+struct capwap_tunnel_ctx {
+ bool fd_open; /* Set to true once the fd is opened */
+ u32 last_irq_count; /* Last value returned from read */
+ u32 irq_count; /* Number of irqs since last read */
+ wait_queue_head_t wait_queue; /* Waiting processes */
+ spinlock_t lock;
+ struct list_head dtls_head;
+ struct list_head dtls_tail;
+ struct qman_fq *tx_fq;
+};
+
+static struct capwap_tunnel_ctx control_dtls_ctx;
+static struct capwap_tunnel_ctx control_n_dtls_ctx;
+static struct capwap_tunnel_ctx data_dtls_ctx;
+static struct capwap_tunnel_ctx data_n_dtls_ctx;
+static struct dpaa_capwap_domain *capwap_domain;
+
+static int process_fd(struct capwap_tunnel_ctx *ctx, const struct qm_fd *fd,
+ struct net_device *net_dev)
+{
+ struct qm_fd *fd_cp;
+ u32 count;
+ struct capwap_dtls_list *new_node, *first_node;
+ unsigned long flags;
+
+
+ spin_lock_irqsave(&ctx->lock, flags);
+
+ if (ctx->irq_count > ctx->last_irq_count)
+ count = ctx->irq_count - ctx->last_irq_count;
+ else
+ count = ctx->last_irq_count - ctx->irq_count;
+
+ if (count > MAX_LIST_LENGTH) {
+ first_node = container_of((&ctx->dtls_head)->next,
+ struct capwap_dtls_list, list);
+ dpa_fd_release(net_dev, first_node->fd);
+ kfree(first_node->fd);
+ kfree(first_node);
+ list_del((&ctx->dtls_head)->next);
+ }
+
+ new_node = kmalloc(sizeof(struct capwap_dtls_list), GFP_KERNEL);
+ if (!new_node) {
+ spin_unlock_irqrestore(&ctx->lock, flags);
+ return -1;
+ }
+ fd_cp = kmalloc(sizeof(struct qm_fd), GFP_KERNEL);
+ if (!fd_cp) {
+ kfree(new_node);
+ spin_unlock_irqrestore(&ctx->lock, flags);
+ return -1;
+ }
+ memcpy(fd_cp, fd, sizeof(struct qm_fd));
+
+ new_node->fd = fd_cp;
+ list_add_tail(&new_node->list, &ctx->dtls_tail);
+ ctx->irq_count++;
+ spin_unlock_irqrestore(&ctx->lock, flags);
+ wake_up_all(&ctx->wait_queue);
+ return 0;
+}
+
+enum qman_cb_dqrr_result __hot
+capwap_control_dtls_rx_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ struct net_device *net_dev;
+ struct dpa_priv_s *priv;
+ struct dpa_percpu_priv_s *percpu_priv;
+ const struct qm_fd *fd = &dq->fd;
+ struct capwap_tunnel_ctx *ctx;
+
+ net_dev = ((struct dpa_fq *)fq)->net_dev;
+ priv = netdev_priv(net_dev);
+
+ /* IRQ handler, non-migratable; safe to use __this_cpu_ptr here */
+ percpu_priv = __this_cpu_ptr(priv->percpu_priv);
+
+ if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal)))
+ return qman_cb_dqrr_stop;
+
+ ctx = &control_dtls_ctx;
+
+ if (!ctx || !ctx->fd_open)
+ goto out;
+ if (!process_fd(ctx, fd, net_dev))
+ return qman_cb_dqrr_consume;
+out:
+ dpa_fd_release(net_dev, fd);
+
+ return qman_cb_dqrr_consume;
+}
+
+enum qman_cb_dqrr_result __hot
+capwap_control_n_dtls_rx_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ struct net_device *net_dev;
+ struct dpa_priv_s *priv;
+ struct dpa_percpu_priv_s *percpu_priv;
+ const struct qm_fd *fd = &dq->fd;
+ struct capwap_tunnel_ctx *ctx;
+
+ net_dev = ((struct dpa_fq *)fq)->net_dev;
+ priv = netdev_priv(net_dev);
+
+ /* IRQ handler, non-migratable; safe to use __this_cpu_ptr here */
+ percpu_priv = __this_cpu_ptr(priv->percpu_priv);
+
+ if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal)))
+ return qman_cb_dqrr_stop;
+
+ ctx = &control_n_dtls_ctx;
+
+ if (!ctx || !ctx->fd_open)
+ goto out;
+ if (!process_fd(ctx, fd, net_dev))
+ return qman_cb_dqrr_consume;
+out:
+ dpa_fd_release(net_dev, fd);
+
+ return qman_cb_dqrr_consume;
+}
+
+int upload_data_packets(u32 fqid, const struct qm_fd *fd,
+ struct net_device *net_dev)
+{
+ u32 fqid_base;
+ struct capwap_tunnel_ctx *ctx;
+
+ fqid_base = capwap_domain->fqs->inbound_core_rx_fqs.fqid_base;
+ if (fqid == (fqid_base + 1))
+ ctx = &data_dtls_ctx;
+ else
+ ctx = &data_n_dtls_ctx;
+
+ if (!ctx || !ctx->fd_open)
+ goto out;
+ if (!process_fd(ctx, fd, net_dev))
+ return 0;
+out:
+ return -1;
+}
+
+static void init_tunnel_ctx(struct capwap_tunnel_ctx *ctx, int tunnel_id,
+ struct file *filp)
+{
+ struct qman_fq *fq;
+
+ ctx->irq_count = 0;
+ ctx->last_irq_count = 0;
+ INIT_LIST_HEAD(&ctx->dtls_head);
+ list_add(&ctx->dtls_tail, &ctx->dtls_head);
+ init_waitqueue_head(&ctx->wait_queue);
+ spin_lock_init(&ctx->lock);
+ fq = (struct qman_fq *)capwap_domain->fqs->outbound_core_tx_fqs.fq_base;
+ ctx->tx_fq = &fq[tunnel_id];
+ filp->private_data = ctx;
+ ctx->fd_open = true;
+}
+
+static int capwap_control_dtls_open(struct inode *inode, struct file *filp)
+{
+ init_tunnel_ctx(&control_dtls_ctx, 0, filp);
+ return 0;
+}
+
+static int capwap_control_n_dtls_open(struct inode *inode, struct file *filp)
+{
+ init_tunnel_ctx(&control_n_dtls_ctx, 2, filp);
+ return 0;
+}
+
+static int capwap_data_dtls_open(struct inode *inode, struct file *filp)
+{
+ init_tunnel_ctx(&data_dtls_ctx, 1, filp);
+ return 0;
+}
+
+static int capwap_data_n_dtls_open(struct inode *inode, struct file *filp)
+{
+ init_tunnel_ctx(&data_n_dtls_ctx, 3, filp);
+ return 0;
+}
+
+static int capwap_tunnel_release(struct inode *inode, struct file *filp)
+{
+ struct capwap_tunnel_ctx *ctx = filp->private_data;
+ struct capwap_dtls_list *capwap_node;
+ unsigned long flags;
+
+ ctx->fd_open = false;
+
+ spin_lock_irqsave(&ctx->lock, flags);
+ list_del(&ctx->dtls_tail);
+ list_for_each_entry(capwap_node, &ctx->dtls_head, list) {
+ dpa_fd_release(capwap_domain->net_dev, capwap_node->fd);
+ kfree(capwap_node->fd);
+ kfree(capwap_node);
+ }
+ INIT_LIST_HEAD(&ctx->dtls_head);
+ list_add(&ctx->dtls_tail, &ctx->dtls_head);
+ spin_unlock_irqrestore(&ctx->lock, flags);
+ return 0;
+}
+
+static ssize_t capwap_tunnel_read(struct file *filp, char __user *buff,
+ size_t count, loff_t *offp)
+{
+ struct capwap_tunnel_ctx *ctx = filp->private_data;
+ struct qm_fd *fd;
+ dma_addr_t addr;
+ struct qm_sg_entry *sg_entry;
+ uint32_t len;
+ uint32_t final = 0;
+ void *data;
+ struct capwap_dtls_list *first_node;
+ unsigned long flags;
+ struct dpa_bp *dpa_bp;
+ struct dpa_priv_s *priv;
+
+ priv = netdev_priv(capwap_domain->net_dev);
+ dpa_bp = priv->dpa_bp;
+
+ spin_lock_irqsave(&ctx->lock, flags);
+
+ if (ctx->dtls_head.next == &ctx->dtls_tail) {
+ spin_unlock_irqrestore(&ctx->lock, flags);
+ return 0;
+ }
+
+ first_node = container_of((&ctx->dtls_head)->next,
+ struct capwap_dtls_list, list);
+
+ fd = first_node->fd;
+
+ if (fd->format == qm_fd_sg) {/*short sg */
+ addr = qm_fd_addr(fd);
+ len = fd->length20;
+ data = phys_to_virt(addr);
+ data += fd->offset;
+ sg_entry = (struct qm_sg_entry *) data;
+ do {
+ addr = qm_sg_addr(sg_entry);
+ len = sg_entry->length;
+ final = sg_entry->final;
+ data = phys_to_virt(addr);
+ data += sg_entry->offset;
+ if (copy_to_user(buff, data, len)) {
+ spin_unlock_irqrestore(&ctx->lock, flags);
+ return -EFAULT;
+ }
+ if (final)
+ break;
+ buff += len;
+ sg_entry++;
+ } while (1);
+ } else if (fd->format == qm_fd_contig) { /* short single */
+ addr = qm_fd_addr(fd);
+ len = fd->length20;
+ data = phys_to_virt(addr);
+ data += fd->offset;
+ if (copy_to_user(buff, data, len)) {
+ spin_unlock_irqrestore(&ctx->lock, flags);
+ return -EFAULT;
+ }
+ }
+
+ len = fd->length20;
+ ctx->last_irq_count = ctx->irq_count;
+
+ dpa_fd_release(capwap_domain->net_dev, fd);
+
+ list_del((&ctx->dtls_head)->next);
+ kfree(first_node->fd);
+ kfree(first_node);
+ spin_unlock_irqrestore(&ctx->lock, flags);
+
+ return len;
+
+}
+
+static ssize_t capwap_tunnel_write(struct file *filp,
+ const char __user *buf, size_t count, loff_t *off)
+{
+ struct capwap_tunnel_ctx *ctx = filp->private_data;
+ struct dpa_percpu_priv_s *percpu_priv;
+ struct dpa_priv_s *priv;
+ struct dpa_bp *dpa_bp;
+ struct qm_fd fd;
+ dma_addr_t addr;
+ void *data;
+ int err;
+ struct bm_buffer bmb;
+ int i;
+
+ priv = netdev_priv(capwap_domain->net_dev);
+ percpu_priv = __this_cpu_ptr(priv->percpu_priv);
+
+ dpa_bp = priv->dpa_bp;
+
+ err = bman_acquire(dpa_bp->pool, &bmb, 1, 0);
+ if (unlikely(err <= 0)) {
+ percpu_priv->stats.tx_errors++;
+ if (err == 0)
+ err = -ENOMEM;
+ return -EFAULT;
+ }
+
+ memset(&fd, 0, sizeof(fd));
+ fd.bpid = dpa_bp->bpid;
+
+ fd.length20 = count;
+ fd.addr_hi = bmb.hi;
+ fd.addr_lo = bmb.lo;
+ fd.offset = priv->tx_headroom + 64;
+ fd.format = qm_fd_contig;
+
+ addr = qm_fd_addr(&fd);
+ data = phys_to_virt(addr + dpa_fd_offset(&fd));
+ if (count > dpa_bp->size)
+ return -EINVAL;
+ if (copy_from_user(data, buf, count))
+ return -EFAULT;
+
+ for (i = 0; i < 100000; i++) {
+ err = qman_enqueue(ctx->tx_fq, &fd, 0);
+ if (err != -EBUSY)
+ break;
+ }
+ if (unlikely(err < 0)) {
+ pr_warn("capwap_tunnel:transmit to dpaa error\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static unsigned int capwap_tunnel_poll(struct file *filp, poll_table *wait)
+{
+ struct capwap_tunnel_ctx *ctx = filp->private_data;
+ unsigned int ret = 0;
+
+ poll_wait(filp, &ctx->wait_queue, wait);
+
+ if (ctx->irq_count != ctx->last_irq_count)
+ ret |= POLLIN | POLLRDNORM;
+ return ret;
+}
+
+static const struct file_operations capwap_control_dtls_fops = {
+ .open = capwap_control_dtls_open,
+ .release = capwap_tunnel_release,
+ .read = capwap_tunnel_read,
+ .write = capwap_tunnel_write,
+ .poll = capwap_tunnel_poll
+};
+
+static struct miscdevice capwap_ctrl_dtls_miscdev = {
+ .name = "fsl-capwap-ctrl-dtls",
+ .fops = &capwap_control_dtls_fops,
+ .minor = MISC_DYNAMIC_MINOR,
+};
+
+static const struct file_operations capwap_control_n_dtls_fops = {
+ .open = capwap_control_n_dtls_open,
+ .release = capwap_tunnel_release,
+ .read = capwap_tunnel_read,
+ .write = capwap_tunnel_write,
+ .poll = capwap_tunnel_poll
+};
+
+static struct miscdevice capwap_ctrl_n_dtls_miscdev = {
+ .name = "fsl-capwap-ctrl-n-dtls",
+ .fops = &capwap_control_n_dtls_fops,
+ .minor = MISC_DYNAMIC_MINOR,
+};
+
+static const struct file_operations capwap_data_dtls_fops = {
+ .open = capwap_data_dtls_open,
+ .release = capwap_tunnel_release,
+ .read = capwap_tunnel_read,
+ .write = capwap_tunnel_write,
+ .poll = capwap_tunnel_poll
+};
+
+static struct miscdevice capwap_data_dtls_miscdev = {
+ .name = "fsl-capwap-data-dtls",
+ .fops = &capwap_data_dtls_fops,
+ .minor = MISC_DYNAMIC_MINOR,
+};
+
+static const struct file_operations capwap_data_n_dtls_fops = {
+ .open = capwap_data_n_dtls_open,
+ .release = capwap_tunnel_release,
+ .read = capwap_tunnel_read,
+ .write = capwap_tunnel_write,
+ .poll = capwap_tunnel_poll
+};
+
+static struct miscdevice capwap_data_n_dtls_miscdev = {
+ .name = "fsl-capwap-data-n-dtls",
+ .fops = &capwap_data_n_dtls_fops,
+ .minor = MISC_DYNAMIC_MINOR,
+};
+
+int capwap_tunnel_drv_init(struct dpaa_capwap_domain *domain)
+{
+ int ret;
+
+ memset(&control_dtls_ctx, 0, sizeof(struct capwap_tunnel_ctx));
+ memset(&control_n_dtls_ctx, 0, sizeof(struct capwap_tunnel_ctx));
+ memset(&data_dtls_ctx, 0, sizeof(struct capwap_tunnel_ctx));
+ memset(&data_n_dtls_ctx, 0, sizeof(struct capwap_tunnel_ctx));
+ capwap_domain = domain;
+
+ pr_info("Freescale CAPWAP Control Packet Tunnel Interface driver\n");
+ ret = misc_register(&capwap_ctrl_dtls_miscdev);
+ if (ret)
+ pr_err("fsl-capwap-control: failed to register misc device\n");
+ ret = misc_register(&capwap_ctrl_n_dtls_miscdev);
+ if (ret)
+ pr_err("fsl-capwap-n-control: failed to register misc device\n");
+ ret = misc_register(&capwap_data_dtls_miscdev);
+ if (ret)
+ pr_err("fsl-capwap-control: failed to register misc device\n");
+ ret = misc_register(&capwap_data_n_dtls_miscdev);
+ if (ret)
+ pr_err("fsl-capwap-n-control: failed to register misc device\n");
+ return ret;
+}
+
+void capwap_tunnel_drv_exit(void)
+{
+ capwap_domain = NULL;
+ misc_deregister(&capwap_ctrl_dtls_miscdev);
+ misc_deregister(&capwap_ctrl_n_dtls_miscdev);
+ misc_deregister(&capwap_data_dtls_miscdev);
+ misc_deregister(&capwap_data_n_dtls_miscdev);
+}
diff --git a/drivers/net/ethernet/freescale/dpa/capwap/fsl_capwap_br.h b/drivers/net/ethernet/freescale/dpa/capwap/fsl_capwap_br.h
new file mode 100644
index 0000000..7c97e4b
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpa/capwap/fsl_capwap_br.h
@@ -0,0 +1,53 @@
+/* Copyright 2013-2014 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef FSL_CAPWAP_BR_H
+#define FSL_CAPWAP_BR_H
+
+#define FSLBR_IOCTL_MAGIC 'b'
+
+/* ioctl() commands */
+
+#define FSLBR_IOCTL_IF_ADD \
+ _IOWR(FSLBR_IOCTL_MAGIC, 0x01, int)
+
+#define FSLBR_IOCTL_IF_DEL \
+ _IOWR(FSLBR_IOCTL_MAGIC, 0x02, int)
+
+#define FSLBR_IOCTL_IF_LIST \
+ _IOWR(FSLBR_IOCTL_MAGIC, 0x03, int)
+
+#define FSLBR_IOCTL_SET_ENCRYPT \
+ _IOWR(FSLBR_IOCTL_MAGIC, 0x04, int)
+
+#define MAX_IF_COUNT 3
+
+#endif /* FSL_CAPWAP_BR_H */