From 482a4d07104c4bfabef653eb623aa4c7c307459a Mon Sep 17 00:00:00 2001 From: Geoff Thorpe Date: Wed, 13 Mar 2013 04:08:11 -0500 Subject: fsl_qbman: Add drivers for the Freescale DPAA Q/BMan Signed-off-by: Geoff Thorpe Signed-off-by: Bharat Bhushan Signed-off-by: Bogdan Hamciuc Signed-off-by: Hai-Ying Wang Signed-off-by: Jeffrey Ladouceur Signed-off-by: Jia-Fei Pan Signed-off-by: Kumar Gala Signed-off-by: Priyanka Jain Signed-off-by: Vakul Garg Signed-off-by: Emil Medve diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index 329bdb4..f48a9cc 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -142,4 +142,6 @@ source "drivers/staging/sb105x/Kconfig" source "drivers/staging/fwserial/Kconfig" +source "drivers/staging/fsl_qbman/Kconfig" + endif # STAGING diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index c7ec486..4fc591b 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -63,3 +63,4 @@ obj-$(CONFIG_DRM_IMX) += imx-drm/ obj-$(CONFIG_DGRP) += dgrp/ obj-$(CONFIG_SB105X) += sb105x/ obj-$(CONFIG_FIREWIRE_SERIAL) += fwserial/ +obj-$(CONFIG_FSL_DPA) += fsl_qbman/ diff --git a/drivers/staging/fsl_qbman/Kconfig b/drivers/staging/fsl_qbman/Kconfig new file mode 100644 index 0000000..d5adc5a --- /dev/null +++ b/drivers/staging/fsl_qbman/Kconfig @@ -0,0 +1,223 @@ +config FSL_DPA + bool "Freescale Datapath Queue and Buffer management" + depends on HAS_FSL_QBMAN + default y + select FSL_QMAN_FQ_LOOKUP if PPC64 + +menu "Freescale Datapath QMan/BMan options" + depends on FSL_DPA + +config FSL_DPA_CHECKING + bool "additional driver checking" + default n + ---help--- + Compiles in additional checks to sanity-check the drivers and any + use of it by other code. Not recommended for performance. + +config FSL_DPA_CAN_WAIT + bool + default y + +config FSL_DPA_CAN_WAIT_SYNC + bool + default y + +config FSL_DPA_PIRQ_FAST + bool + default y + +config FSL_DPA_PIRQ_SLOW + bool + default y + +config FSL_DPA_PORTAL_SHARE + bool + default y + +config FSL_BMAN + bool "Freescale Buffer Manager (BMan) support" + default y + +if FSL_BMAN + +config FSL_BMAN_CONFIG + bool "BMan device management" + default y + ---help--- + If this linux image is running natively, you need this option. If this + linux image is running as a guest OS under the hypervisor, only one + guest OS ("the control plane") needs this option. + +config FSL_BMAN_TEST + tristate "BMan self-tests" + default n + ---help--- + This option compiles self-test code for BMan. + +config FSL_BMAN_TEST_HIGH + bool "BMan high-level self-test" + depends on FSL_BMAN_TEST + default y + ---help--- + This requires the presence of cpu-affine portals, and performs + high-level API testing with them (whichever portal(s) are affine to + the cpu(s) the test executes on). + +config FSL_BMAN_TEST_THRESH + bool "BMan threshold test" + depends on FSL_BMAN_TEST + default y + ---help--- + Multi-threaded (SMP) test of BMan pool depletion. A pool is seeded + before multiple threads (one per cpu) create pool objects to track + depletion state changes. The pool is then drained to empty by a + "drainer" thread, and the other threads that they observe exactly + the depletion state changes that are expected. + +config FSL_BMAN_DEBUGFS + tristate "BMan debugfs interface" + depends on DEBUG_FS + default y + ---help--- + This option compiles debugfs code for BMan. + +endif # FSL_BMAN + +config FSL_QMAN + bool "Freescale Queue Manager (QMan) support" + default y + +if FSL_QMAN + +config FSL_QMAN_BUG_AND_FEATURE_REV1 + bool "workarounds for errata and missing features in p4080 rev1" + default y + ---help--- + If this option is selected, the driver will be compiled with + workarounds for errata as well as feature limitations (relative to + more recent parts) of p4080 rev1. On unaffected revisions, this + support incurs only a negligable overhead, typically only a couple of + instructions per non-fast-path operation (the fast-path operations are + unaffected). + + If in doubt, say Y. + +config FSL_QMAN_POLL_LIMIT + int + default 32 + +config FSL_QMAN_CONFIG + bool "QMan device management" + default y + ---help--- + If this linux image is running natively, you need this option. If this + linux image is running as a guest OS under the hypervisor, only one + guest OS ("the control plane") needs this option. + +config FSL_QMAN_TEST + tristate "QMan self-tests" + default n + ---help--- + This option compiles self-test code for QMan. + +config FSL_QMAN_TEST_STASH_POTATO + bool "QMan 'hot potato' data-stashing self-test" + depends on FSL_QMAN_TEST + default y + ---help--- + This performs a "hot potato" style test enqueuing/dequeuing a frame + across a series of FQs scheduled to different portals (and cpus), with + DQRR, data and context stashing always on. + +config FSL_QMAN_TEST_HIGH + bool "QMan high-level self-test" + depends on FSL_QMAN_TEST + default y + ---help--- + This requires the presence of cpu-affine portals, and performs + high-level API testing with them (whichever portal(s) are affine to + the cpu(s) the test executes on). + +config FSL_QMAN_TEST_ERRATA + bool "QMan errata-handling self-test" + depends on FSL_QMAN_TEST + default y + ---help--- + This requires the presence of cpu-affine portals, and performs + testing that handling for known hardware-errata is correct. + +config FSL_QMAN_DEBUGFS + tristate "QMan debugfs interface" + depends on DEBUG_FS + default y + ---help--- + This option compiles debugfs code for QMan. + +# H/w settings that can be hard-coded for now. +config FSL_QMAN_FQD_SZ + int "size of Frame Queue Descriptor region" + default 10 + ---help--- + This is the size of the FQD region defined as: PAGE_SIZE * (2^value) + ex: 10 => PAGE_SIZE * (2^10) + Note: Default device-trees now require minimum Kconfig setting of 10. + +config FSL_QMAN_PFDR_SZ + int "size of the PFDR pool" + default 13 + ---help--- + This is the size of the PFDR pool defined as: PAGE_SIZE * (2^value) + ex: 13 => PAGE_SIZE * (2^13) + +# Corenet initiator settings. Stash request queues are 4-deep to match cores' +# ability to snart. Stash priority is 3, other priorities are 2. +config FSL_QMAN_CI_SCHED_CFG_SRCCIV + int + depends on FSL_QMAN_CONFIG + default 4 +config FSL_QMAN_CI_SCHED_CFG_SRQ_W + int + depends on FSL_QMAN_CONFIG + default 3 +config FSL_QMAN_CI_SCHED_CFG_RW_W + int + depends on FSL_QMAN_CONFIG + default 2 +config FSL_QMAN_CI_SCHED_CFG_BMAN_W + int + depends on FSL_QMAN_CONFIG + default 2 + +# portal interrupt settings +config FSL_QMAN_PIRQ_DQRR_ITHRESH + int + default 12 +config FSL_QMAN_PIRQ_MR_ITHRESH + int + default 4 +config FSL_QMAN_PIRQ_IPERIOD + int + default 100 + +# 64 bit kernel support +config FSL_QMAN_FQ_LOOKUP + bool + default n + +config QMAN_CEETM_UPDATE_PERIOD + int "Token update period for shaping, in nanoseconds" + default 1000 + ---help--- + Traffic shaping works by performing token calculations (using + credits) on shaper instances periodically. This update period + sets the granularity for how often those token rate credit + updates are performed, and thus determines the accuracy and + range of traffic rates that can be configured by users. The + reference manual recommends a 1 microsecond period as providing + a good balance between granularity and range. + + Unless you know what you are doing, leave this value at its default. + +endif # FSL_QMAN + +endmenu diff --git a/drivers/staging/fsl_qbman/Makefile b/drivers/staging/fsl_qbman/Makefile new file mode 100644 index 0000000..7e385aa --- /dev/null +++ b/drivers/staging/fsl_qbman/Makefile @@ -0,0 +1,22 @@ +# Common +obj-$(CONFIG_FSL_DPA) += dpa_alloc.o + +# Bman +obj-$(CONFIG_FSL_BMAN) += bman_high.o +obj-$(CONFIG_FSL_BMAN_CONFIG) += bman_config.o bman_driver.o +obj-$(CONFIG_FSL_BMAN_TEST) += bman_tester.o +obj-$(CONFIG_FSL_BMAN_DEBUGFS) += bman_debugfs_interface.o +bman_tester-y = bman_test.o +bman_tester-$(CONFIG_FSL_BMAN_TEST_HIGH) += bman_test_high.o +bman_tester-$(CONFIG_FSL_BMAN_TEST_THRESH) += bman_test_thresh.o +bman_debugfs_interface-y = bman_debugfs.o + +# Qman +obj-$(CONFIG_FSL_QMAN) += qman_high.o qman_utility.o +obj-$(CONFIG_FSL_QMAN_CONFIG) += qman_config.o qman_driver.o +obj-$(CONFIG_FSL_QMAN_TEST) += qman_tester.o +qman_tester-y = qman_test.o qman_test_hotpotato.o \ + qman_test_high.o +qman_tester-$(CONFIG_FSL_QMAN_TEST_ERRATA) += qman_test_errata.o +obj-$(CONFIG_FSL_QMAN_DEBUGFS) += qman_debugfs_interface.o +qman_debugfs_interface-y = qman_debugfs.o diff --git a/drivers/staging/fsl_qbman/bman_config.c b/drivers/staging/fsl_qbman/bman_config.c new file mode 100644 index 0000000..4ea7418 --- /dev/null +++ b/drivers/staging/fsl_qbman/bman_config.c @@ -0,0 +1,718 @@ +/* Copyright (c) 2009-2012 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef CONFIG_SMP +#include /* get_hard_smp_processor_id() */ +#endif + +#include +#include "bman_private.h" + +/* Last updated for v00.79 of the BG */ + +struct bman; + +/* Register offsets */ +#define REG_POOL_SWDET(n) (0x0000 + ((n) * 0x04)) +#define REG_POOL_HWDET(n) (0x0100 + ((n) * 0x04)) +#define REG_POOL_SWDXT(n) (0x0200 + ((n) * 0x04)) +#define REG_POOL_HWDXT(n) (0x0300 + ((n) * 0x04)) +#define REG_POOL_CONTENT(n) (0x0600 + ((n) * 0x04)) +#define REG_FBPR_FPC 0x0800 +#define REG_ECSR 0x0a00 +#define REG_ECIR 0x0a04 +#define REG_EADR 0x0a08 +#define REG_EDATA(n) (0x0a10 + ((n) * 0x04)) +#define REG_SBEC(n) (0x0a80 + ((n) * 0x04)) +#define REG_IP_REV_1 0x0bf8 +#define REG_IP_REV_2 0x0bfc +#define REG_FBPR_BARE 0x0c00 +#define REG_FBPR_BAR 0x0c04 +#define REG_FBPR_AR 0x0c10 +#define REG_SRCIDR 0x0d04 +#define REG_LIODNR 0x0d08 +#define REG_ERR_ISR 0x0e00 /* + "enum bm_isr_reg" */ + +/* Used by all error interrupt registers except 'inhibit' */ +#define BM_EIRQ_IVCI 0x00000010 /* Invalid Command Verb */ +#define BM_EIRQ_FLWI 0x00000008 /* FBPR Low Watermark */ +#define BM_EIRQ_MBEI 0x00000004 /* Multi-bit ECC Error */ +#define BM_EIRQ_SBEI 0x00000002 /* Single-bit ECC Error */ +#define BM_EIRQ_BSCN 0x00000001 /* pool State Change Notification */ + +/* BMAN_ECIR valid error bit */ +#define PORTAL_ECSR_ERR (BM_EIRQ_IVCI) + +union bman_ecir { + u32 ecir_raw; + struct { + u32 __reserved1:4; + u32 portal_num:4; + u32 __reserved2:12; + u32 numb:4; + u32 __reserved3:2; + u32 pid:6; + } __packed info; +}; + +union bman_eadr { + u32 eadr_raw; + struct { + u32 __reserved1:5; + u32 memid:3; + u32 __reserved2:14; + u32 eadr:10; + } __packed info; +}; + +struct bman_hwerr_txt { + u32 mask; + const char *txt; +}; + +#define BMAN_HWE_TXT(a, b) { .mask = BM_EIRQ_##a, .txt = b } + +static const struct bman_hwerr_txt bman_hwerr_txts[] = { + BMAN_HWE_TXT(IVCI, "Invalid Command Verb"), + BMAN_HWE_TXT(FLWI, "FBPR Low Watermark"), + BMAN_HWE_TXT(MBEI, "Multi-bit ECC Error"), + BMAN_HWE_TXT(SBEI, "Single-bit ECC Error"), + BMAN_HWE_TXT(BSCN, "Pool State Change Notification"), +}; +#define BMAN_HWE_COUNT (sizeof(bman_hwerr_txts)/sizeof(struct bman_hwerr_txt)) + +struct bman_error_info_mdata { + u16 addr_mask; + u16 bits; + const char *txt; +}; + +#define BMAN_ERR_MDATA(a, b, c) { .addr_mask = a, .bits = b, .txt = c} +static const struct bman_error_info_mdata error_mdata[] = { + BMAN_ERR_MDATA(0x03FF, 192, "Stockpile memory"), + BMAN_ERR_MDATA(0x00FF, 256, "SW portal ring memory port 1"), + BMAN_ERR_MDATA(0x00FF, 256, "SW portal ring memory port 2"), +}; +#define BMAN_ERR_MDATA_COUNT \ + (sizeof(error_mdata)/sizeof(struct bman_error_info_mdata)) + +/* Add this in Kconfig */ +#define BMAN_ERRS_TO_UNENABLE (BM_EIRQ_FLWI) + +/** + * bm_err_isr__ - Manipulate global interrupt registers + * @v: for accessors that write values, this is the 32-bit value + * + * Manipulates BMAN_ERR_ISR, BMAN_ERR_IER, BMAN_ERR_ISDR, BMAN_ERR_IIR. All + * manipulations except bm_err_isr_[un]inhibit() use 32-bit masks composed of + * the BM_EIRQ_*** definitions. Note that "bm_err_isr_enable_write" means + * "write the enable register" rather than "enable the write register"! + */ +#define bm_err_isr_status_read(bm) __bm_err_isr_read(bm, bm_isr_status) +#define bm_err_isr_status_clear(bm, m) __bm_err_isr_write(bm, bm_isr_status,m) +#define bm_err_isr_enable_read(bm) __bm_err_isr_read(bm, bm_isr_enable) +#define bm_err_isr_enable_write(bm, v) __bm_err_isr_write(bm, bm_isr_enable,v) +#define bm_err_isr_disable_read(bm) __bm_err_isr_read(bm, bm_isr_disable) +#define bm_err_isr_disable_write(bm, v) __bm_err_isr_write(bm, bm_isr_disable,v) +#define bm_err_isr_inhibit(bm) __bm_err_isr_write(bm, bm_isr_inhibit,1) +#define bm_err_isr_uninhibit(bm) __bm_err_isr_write(bm, bm_isr_inhibit,0) + +/* + * TODO: unimplemented registers + * + * BMAN_POOLk_SDCNT, BMAN_POOLk_HDCNT, BMAN_FULT, + * BMAN_VLDPL, BMAN_EECC, BMAN_SBET, BMAN_EINJ + */ + +/* Encapsulate "struct bman *" as a cast of the register space address. */ + +static struct bman *bm_create(void *regs) +{ + return (struct bman *)regs; +} + +static inline u32 __bm_in(struct bman *bm, u32 offset) +{ + return in_be32((void *)bm + offset); +} +static inline void __bm_out(struct bman *bm, u32 offset, u32 val) +{ + out_be32((void *)bm + offset, val); +} +#define bm_in(reg) __bm_in(bm, REG_##reg) +#define bm_out(reg, val) __bm_out(bm, REG_##reg, val) + +static u32 __bm_err_isr_read(struct bman *bm, enum bm_isr_reg n) +{ + return __bm_in(bm, REG_ERR_ISR + (n << 2)); +} + +static void __bm_err_isr_write(struct bman *bm, enum bm_isr_reg n, u32 val) +{ + __bm_out(bm, REG_ERR_ISR + (n << 2), val); +} + +#if 0 +static void bm_get_details(struct bman *bm, u8 *int_options, u8 *errata, + u8 *conf_options) +{ + u32 v = bm_in(IP_REV_1); + *int_options = (v >> 16) & 0xff; + *errata = (v >> 8) & 0xff; + *conf_options = v & 0xff; +} + +static u8 bm_get_corenet_sourceid(struct bman *bm) +{ + return bm_in(SRCIDR); +} + +static void bm_set_liodn(struct bman *bm, u16 liodn) +{ + bm_out(LIODNR, liodn & 0xfff); +} + +#endif + +static void bm_get_version(struct bman *bm, u16 *id, u8 *major, u8 *minor) +{ + u32 v = bm_in(IP_REV_1); + *id = (v >> 16); + *major = (v >> 8) & 0xff; + *minor = v & 0xff; +} + +static u32 __generate_thresh(u32 val, int roundup) +{ + u32 e = 0; /* co-efficient, exponent */ + int oddbit = 0; + while(val > 0xff) { + oddbit = val & 1; + val >>= 1; + e++; + if(roundup && oddbit) + val++; + } + DPA_ASSERT(e < 0x10); + return (val | (e << 8)); +} + +static void bm_set_pool(struct bman *bm, u8 pool, u32 swdet, u32 swdxt, + u32 hwdet, u32 hwdxt) +{ + DPA_ASSERT(pool < bman_pool_max); + bm_out(POOL_SWDET(pool), __generate_thresh(swdet, 0)); + bm_out(POOL_SWDXT(pool), __generate_thresh(swdxt, 1)); + bm_out(POOL_HWDET(pool), __generate_thresh(hwdet, 0)); + bm_out(POOL_HWDXT(pool), __generate_thresh(hwdxt, 1)); +} + +static void bm_set_memory(struct bman *bm, u64 ba, int prio, u32 size) +{ + u32 exp = ilog2(size); + /* choke if size isn't within range */ + DPA_ASSERT((size >= 4096) && (size <= 1073741824) && + is_power_of_2(size)); + /* choke if '[e]ba' has lower-alignment than 'size' */ + DPA_ASSERT(!(ba & (size - 1))); + bm_out(FBPR_BARE, upper_32_bits(ba)); + bm_out(FBPR_BAR, lower_32_bits(ba)); + bm_out(FBPR_AR, (prio ? 0x40000000 : 0) | (exp - 1)); +} + +/*****************/ +/* Config driver */ +/*****************/ + +/* TODO: Kconfig these? */ +#define DEFAULT_FBPR_SZ (PAGE_SIZE << 12) + +/* We support only one of these. */ +static struct bman *bm; +static struct device_node *bm_node; + +/* And this state belongs to 'bm'. It is set during fsl_bman_init(), but used + * during bman_init_ccsr(). */ +static dma_addr_t fbpr_a; +static size_t fbpr_sz = DEFAULT_FBPR_SZ; + +/* Parse the property to extract the memory location and size and + * memblock_reserve() it. If it isn't supplied, memblock_alloc() the default + * size. Also flush this memory range from data cache so that BMAN originated + * transactions for this memory region could be marked non-coherent. + */ +static __init int parse_mem_property(struct device_node *node, const char *name, + dma_addr_t *addr, size_t *sz, int zero) +{ + const u32 *pint; + int ret; + unsigned long vaddr; + + pint = of_get_property(node, name, &ret); + if (!pint || (ret != 16)) { + pr_info("No %s property '%s', using memblock_alloc(%016zx)\n", + node->full_name, name, *sz); + *addr = memblock_alloc(*sz, *sz); + vaddr = (unsigned long)phys_to_virt(*addr); + if (zero) + memset((void *)vaddr, 0, *sz); + flush_dcache_range(vaddr, vaddr + *sz); + return 0; + } + pr_info("Using %s property '%s'\n", node->full_name, name); + /* If using a "zero-pma", don't try to zero it, even if you asked */ + if (zero && of_find_property(node, "zero-pma", &ret)) { + pr_info(" it's a 'zero-pma', not zeroing from s/w\n"); + zero = 0; + } + *addr = ((u64)pint[0] << 32) | (u64)pint[1]; + *sz = ((u64)pint[2] << 32) | (u64)pint[3]; + /* Keep things simple, it's either all in the DRAM range or it's all + * outside. */ + if (*addr < memblock_end_of_DRAM()) { + BUG_ON((u64)*addr + (u64)*sz > memblock_end_of_DRAM()); + if (memblock_reserve(*addr, *sz) < 0) { + pr_err("Failed to reserve %s\n", name); + return -ENOMEM; + } + vaddr = (unsigned long)phys_to_virt(*addr); + if (zero) + memset((void *)vaddr, 0, *sz); + flush_dcache_range(vaddr, vaddr + *sz); + } else if (zero) { + /* map as cacheable, non-guarded */ + void *tmpp = ioremap_prot(*addr, *sz, 0); + memset(tmpp, 0, *sz); + vaddr = (unsigned long)tmpp; + flush_dcache_range(vaddr, vaddr + *sz); + iounmap(tmpp); + } + return 0; +} + +static int __init fsl_bman_init(struct device_node *node) +{ + struct resource res; + u32 __iomem *regs; + const char *s; + int ret, standby = 0; + u16 id; + u8 major, minor; + + ret = of_address_to_resource(node, 0, &res); + if (ret) { + pr_err("Can't get %s property 'reg'\n", + node->full_name); + return ret; + } + s = of_get_property(node, "fsl,hv-claimable", &ret); + if (s && !strcmp(s, "standby")) + standby = 1; + if (!standby) { + ret = parse_mem_property(node, "fsl,bman-fbpr", + &fbpr_a, &fbpr_sz, 0); + BUG_ON(ret); + } + /* Global configuration */ + regs = ioremap(res.start, res.end - res.start + 1); + bm = bm_create(regs); + BUG_ON(!bm); + bm_node = node; + bm_get_version(bm, &id, &major, &minor); + pr_info("Bman ver:%04x,%02x,%02x\n", id, major, minor); + if ((major == 1) && (minor == 0)) { + bman_ip_rev = BMAN_REV10; + bman_pool_max = 64; + } else if ((major == 2) && (minor == 0)) { + bman_ip_rev = BMAN_REV20; + bman_pool_max = 8; + } else if ((major == 2) && (minor == 1)) { + bman_ip_rev = BMAN_REV21; + bman_pool_max = 64; + } else { + pr_warning("unknown Bman version, default to rev1.0\n"); + } + + if (standby) { + pr_info(" -> in standby mode\n"); + return 0; + } + return 0; +} + +int bman_have_ccsr(void) +{ + return (bm ? 1 : 0); +} + +int bm_pool_set(u32 bpid, const u32 *thresholds) +{ + if (!bm) + return -ENODEV; + bm_set_pool(bm, bpid, thresholds[0], thresholds[1], + thresholds[2], thresholds[3]); + return 0; +} +EXPORT_SYMBOL(bm_pool_set); + +__init void bman_init_early(void) +{ + struct device_node *dn; + int ret; + + for_each_compatible_node(dn, NULL, "fsl,bman") { + if (bm) + pr_err("%s: only one 'fsl,bman' allowed\n", + dn->full_name); + else { + if (!of_device_is_available(dn)) + continue; + + ret = fsl_bman_init(dn); + BUG_ON(ret); + } + } +} + +static void log_edata_bits(u32 bit_count) +{ + u32 i, j, mask = 0xffffffff; + + pr_warning("Bman ErrInt, EDATA:\n"); + i = bit_count/32; + if (bit_count%32) { + i++; + mask = ~(mask << bit_count%32); + } + j = 16-i; + pr_warning(" 0x%08x\n", bm_in(EDATA(j)) & mask); + j++; + for (; j < 16; j++) + pr_warning(" 0x%08x\n", bm_in(EDATA(j))); +} + +static void log_additional_error_info(u32 isr_val, u32 ecsr_val) +{ + union bman_ecir ecir_val; + union bman_eadr eadr_val; + + ecir_val.ecir_raw = bm_in(ECIR); + /* Is portal info valid */ + if (ecsr_val & PORTAL_ECSR_ERR) { + pr_warning("Bman ErrInt: SWP id %d, numb %d, pid %d\n", + ecir_val.info.portal_num, ecir_val.info.numb, + ecir_val.info.pid); + } + if (ecsr_val & (BM_EIRQ_SBEI|BM_EIRQ_MBEI)) { + eadr_val.eadr_raw = bm_in(EADR); + pr_warning("Bman ErrInt: EADR Memory: %s, 0x%x\n", + error_mdata[eadr_val.info.memid].txt, + error_mdata[eadr_val.info.memid].addr_mask + & eadr_val.info.eadr); + log_edata_bits(error_mdata[eadr_val.info.memid].bits); + } +} + +/* Bman interrupt handler */ +static irqreturn_t bman_isr(int irq, void *ptr) +{ + u32 isr_val, ier_val, ecsr_val, isr_mask, i; + + ier_val = bm_err_isr_enable_read(bm); + isr_val = bm_err_isr_status_read(bm); + ecsr_val = bm_in(ECSR); + isr_mask = isr_val & ier_val; + + if (!isr_mask) + return IRQ_NONE; + for (i = 0; i < BMAN_HWE_COUNT; i++) { + if (bman_hwerr_txts[i].mask & isr_mask) { + pr_warning("Bman ErrInt: %s\n", bman_hwerr_txts[i].txt); + if (bman_hwerr_txts[i].mask & ecsr_val) { + log_additional_error_info(isr_mask, ecsr_val); + /* Re-arm error capture registers */ + bm_out(ECSR, ecsr_val); + } + if (bman_hwerr_txts[i].mask & BMAN_ERRS_TO_UNENABLE) { + pr_devel("Bman un-enabling error 0x%x\n", + bman_hwerr_txts[i].mask); + ier_val &= ~bman_hwerr_txts[i].mask; + bm_err_isr_enable_write(bm, ier_val); + } + } + } + bm_err_isr_status_clear(bm, isr_val); + return IRQ_HANDLED; +} + +static int __bind_irq(void) +{ + int ret, err_irq; + + err_irq = of_irq_to_resource(bm_node, 0, NULL); + if (err_irq == NO_IRQ) { + pr_info("Can't get %s property '%s'\n", bm_node->full_name, + "interrupts"); + return -ENODEV; + } + ret = request_irq(err_irq, bman_isr, IRQF_SHARED, "bman-err", bm_node); + if (ret) { + pr_err("request_irq() failed %d for '%s'\n", ret, + bm_node->full_name); + return -ENODEV; + } + /* Disable Buffer Pool State Change */ + bm_err_isr_disable_write(bm, BM_EIRQ_BSCN); + /* Write-to-clear any stale bits, (eg. starvation being asserted prior + * to resource allocation during driver init). */ + bm_err_isr_status_clear(bm, 0xffffffff); + /* Enable Error Interrupts */ + bm_err_isr_enable_write(bm, 0xffffffff); + return 0; +} + +int bman_init_ccsr(struct device_node *node) +{ + int ret; + if (!bman_have_ccsr()) + return 0; + if (node != bm_node) + return -EINVAL; + /* FBPR memory */ + bm_set_memory(bm, fbpr_a, 0, fbpr_sz); + ret = __bind_irq(); + if (ret) + return ret; + return 0; +} + +u32 bm_pool_free_buffers(u32 bpid) +{ + return bm_in(POOL_CONTENT(bpid)); +} + +#ifdef CONFIG_SYSFS + +#define DRV_NAME "fsl-bman" + +static ssize_t show_fbpr_fpc(struct device *dev, + struct device_attribute *dev_attr, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%u\n", bm_in(FBPR_FPC)); +}; + +static ssize_t show_pool_count(struct device *dev, + struct device_attribute *dev_attr, char *buf) +{ + u32 data; + int i; + + if (!sscanf(dev_attr->attr.name, "%d", &i)) + return -EINVAL; + data = bm_in(POOL_CONTENT(i)); + return snprintf(buf, PAGE_SIZE, "%d\n", data); +}; + +static ssize_t show_err_isr(struct device *dev, + struct device_attribute *dev_attr, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "0x%08x\n", bm_in(ERR_ISR)); +}; + +static ssize_t show_sbec(struct device *dev, + struct device_attribute *dev_attr, char *buf) +{ + int i; + + if (!sscanf(dev_attr->attr.name, "sbec_%d", &i)) + return -EINVAL; + return snprintf(buf, PAGE_SIZE, "%u\n", bm_in(SBEC(i))); +}; + +static DEVICE_ATTR(err_isr, S_IRUSR, show_err_isr, NULL); +static DEVICE_ATTR(fbpr_fpc, S_IRUSR, show_fbpr_fpc, NULL); + +/* Didn't use DEVICE_ATTR as 64 of this would be required. + * Initialize them when needed. */ +static char *name_attrs_pool_count; /* "xx" + null-terminator */ +static struct device_attribute *dev_attr_buffer_pool_count; + +static DEVICE_ATTR(sbec_0, S_IRUSR, show_sbec, NULL); +static DEVICE_ATTR(sbec_1, S_IRUSR, show_sbec, NULL); + +static struct attribute *bman_dev_attributes[] = { + &dev_attr_fbpr_fpc.attr, + &dev_attr_err_isr.attr, + NULL +}; + +static struct attribute *bman_dev_ecr_attributes[] = { + &dev_attr_sbec_0.attr, + &dev_attr_sbec_1.attr, + NULL +}; + +static struct attribute **bman_dev_pool_count_attributes; + + +/* root level */ +static const struct attribute_group bman_dev_attr_grp = { + .name = NULL, + .attrs = bman_dev_attributes +}; +static const struct attribute_group bman_dev_ecr_grp = { + .name = "error_capture", + .attrs = bman_dev_ecr_attributes +}; +static struct attribute_group bman_dev_pool_countent_grp = { + .name = "pool_count", +}; + +static int of_fsl_bman_remove(struct platform_device *ofdev) +{ + sysfs_remove_group(&ofdev->dev.kobj, &bman_dev_attr_grp); + return 0; +}; + +static int __devinit of_fsl_bman_probe(struct platform_device *ofdev) +{ + int ret, i; + + ret = sysfs_create_group(&ofdev->dev.kobj, &bman_dev_attr_grp); + if (ret) + goto done; + ret = sysfs_create_group(&ofdev->dev.kobj, &bman_dev_ecr_grp); + if (ret) + goto del_group_0; + + name_attrs_pool_count = kmalloc(sizeof(char) * bman_pool_max * 3, + GFP_KERNEL); + if (!name_attrs_pool_count) { + pr_err("Can't alloc name_attrs_pool_count\n"); + goto del_group_1; + } + + dev_attr_buffer_pool_count = kmalloc(sizeof(struct device_attribute) * + bman_pool_max, GFP_KERNEL); + if (!dev_attr_buffer_pool_count) { + pr_err("Can't alloc dev_attr-buffer_pool_count\n"); + goto del_group_2; + } + + bman_dev_pool_count_attributes = kmalloc(sizeof(struct attribute *) * + (bman_pool_max + 1), GFP_KERNEL); + if (!bman_dev_pool_count_attributes) { + pr_err("can't alloc bman_dev_pool_count_attributes\n"); + goto del_group_3; + } + + for (i = 0; i < (bman_pool_max + 1); i++) { + bman_dev_pool_count_attributes[i] = + kmalloc(sizeof(struct attribute), GFP_KERNEL); + if (!bman_dev_pool_count_attributes[i]) { + pr_err("cannot alloc for each" + " bman_dev_pool_count_attributes\n"); + goto del_group_3; + } + } + + for (i = 0; i < bman_pool_max; i++) { + ret = scnprintf((name_attrs_pool_count + i * 3), 3, "%d", i); + if (!ret) + goto del_group_4; + dev_attr_buffer_pool_count[i].attr.name = + (name_attrs_pool_count + i * 3); + dev_attr_buffer_pool_count[i].attr.mode = S_IRUSR; + dev_attr_buffer_pool_count[i].show = show_pool_count; + bman_dev_pool_count_attributes[i] = + &dev_attr_buffer_pool_count[i].attr; + } + bman_dev_pool_count_attributes[bman_pool_max] = NULL; + + bman_dev_pool_countent_grp.attrs = bman_dev_pool_count_attributes; + + ret = sysfs_create_group(&ofdev->dev.kobj, &bman_dev_pool_countent_grp); + if (ret) + goto del_group_4; + + goto done; + +del_group_4: + for (i = 0; i < (bman_pool_max + 1); i++) + kfree(bman_dev_pool_count_attributes[i]); + kfree(bman_dev_pool_count_attributes); +del_group_3: + kfree(dev_attr_buffer_pool_count); +del_group_2: + kfree(name_attrs_pool_count); +del_group_1: + sysfs_remove_group(&ofdev->dev.kobj, &bman_dev_ecr_grp); +del_group_0: + sysfs_remove_group(&ofdev->dev.kobj, &bman_dev_attr_grp); +done: + if (ret) + dev_err(&ofdev->dev, + "Cannot create dev attributes ret=%d\n", ret); + return ret; +}; + +static struct of_device_id of_fsl_bman_ids[] = { + { + .compatible = "fsl,bman", + }, + {} +}; +MODULE_DEVICE_TABLE(of, of_fsl_bman_ids); + +static struct platform_driver of_fsl_bman_driver = { + .driver = { + .owner = THIS_MODULE, + .name = DRV_NAME, + .of_match_table = of_fsl_bman_ids, + }, + .probe = of_fsl_bman_probe, + .remove = __devexit_p(of_fsl_bman_remove), +}; + +static int bman_ctrl_init(void) +{ + return platform_driver_register(&of_fsl_bman_driver); +} + +static void bman_ctrl_exit(void) +{ + platform_driver_unregister(&of_fsl_bman_driver); +} + +module_init(bman_ctrl_init); +module_exit(bman_ctrl_exit); + +#endif /* CONFIG_SYSFS */ diff --git a/drivers/staging/fsl_qbman/bman_debugfs.c b/drivers/staging/fsl_qbman/bman_debugfs.c new file mode 100644 index 0000000..2c3d771 --- /dev/null +++ b/drivers/staging/fsl_qbman/bman_debugfs.c @@ -0,0 +1,120 @@ +/* Copyright 2010-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include +#include +#include +#include +#include + +static struct dentry *dfs_root; /* debugfs root directory */ + +/******************************************************************************* + * Query Buffer Pool State + ******************************************************************************/ +static int query_bp_state_show(struct seq_file *file, void *offset) +{ + int ret; + struct bm_pool_state state; + int i, j; + u32 mask; + + memset(&state, 0, sizeof(struct bm_pool_state)); + ret = bman_query_pools(&state); + if (ret) { + seq_printf(file, "Error %d\n", ret); + return 0; + } + seq_printf(file, "bp_id free_buffers_avail bp_depleted\n"); + for (i = 0; i < 2; i++) { + mask = 0x80000000; + for (j = 0; j < 32; j++) { + seq_printf(file, + " %-2u %-3s %-3s\n", + (i*32)+j, + (state.as.state.__state[i] & mask) ? "no" : "yes", + (state.ds.state.__state[i] & mask) ? "yes" : "no"); + mask >>= 1; + } + } + return 0; +} + +static int query_bp_state_open(struct inode *inode, struct file *file) +{ + return single_open(file, query_bp_state_show, NULL); +} + +static const struct file_operations query_bp_state_fops = { + .owner = THIS_MODULE, + .open = query_bp_state_open, + .read = seq_read, + .release = single_release, +}; + +static int __init bman_debugfs_module_init(void) +{ + int ret = 0; + struct dentry *d; + + dfs_root = debugfs_create_dir("bman", NULL); + + if (dfs_root == NULL) { + ret = -ENOMEM; + pr_err("Cannot create bman debugfs dir\n"); + goto _return; + } + d = debugfs_create_file("query_bp_state", + S_IRUGO, + dfs_root, + NULL, + &query_bp_state_fops); + if (d == NULL) { + ret = -ENOMEM; + pr_err("Cannot create query_bp_state\n"); + goto _return; + } + return 0; + +_return: + if (dfs_root) + debugfs_remove_recursive(dfs_root); + return ret; +} + +static void __exit bman_debugfs_module_exit(void) +{ + debugfs_remove_recursive(dfs_root); +} + + +module_init(bman_debugfs_module_init); +module_exit(bman_debugfs_module_exit); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/staging/fsl_qbman/bman_driver.c b/drivers/staging/fsl_qbman/bman_driver.c new file mode 100644 index 0000000..403ad87 --- /dev/null +++ b/drivers/staging/fsl_qbman/bman_driver.c @@ -0,0 +1,397 @@ +/* Copyright 2008-2012 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "bman_private.h" + +/* + * Global variables of the max portal/pool number this bman version supported + */ +u16 bman_ip_rev; +EXPORT_SYMBOL(bman_ip_rev); +u16 bman_pool_max; +EXPORT_SYMBOL(bman_pool_max); +u16 bman_portal_max; + +/* After initialising cpus that own shared portal configs, we cache the + * resulting portals (ie. not just the configs) in this array. Then we + * initialise slave cpus that don't have their own portals, redirecting them to + * portals from this cache in a round-robin assignment. */ +static struct bman_portal *shared_portals[NR_CPUS]; +static int num_shared_portals; +static int shared_portals_idx; + +static int __init fsl_bpool_init(struct device_node *node) +{ + int ret; + u32 *thresh, *bpid = (u32 *)of_get_property(node, "fsl,bpid", &ret); + if (!bpid || (ret != 4)) { + pr_err("Can't get %s property 'fsl,bpid'\n", node->full_name); + return -ENODEV; + } + thresh = (u32 *)of_get_property(node, "fsl,bpool-thresholds", &ret); + if (thresh) { + if (ret != 16) { + pr_err("Invalid %s property '%s'\n", + node->full_name, "fsl,bpool-thresholds"); + return -ENODEV; + } + } + if (thresh) { +#ifdef CONFIG_FSL_BMAN_CONFIG + ret = bm_pool_set(*bpid, thresh); + if (ret) + pr_err("No CCSR node for %s property '%s'\n", + node->full_name, "fsl,bpool-thresholds"); + return ret; +#else + pr_err("Ignoring %s property '%s', no CCSR support\n", + node->full_name, "fsl,bpool-thresholds"); +#endif + } + return 0; +} + +static int __init fsl_bpid_range_init(struct device_node *node) +{ + int ret; + u32 *range = (u32 *)of_get_property(node, "fsl,bpid-range", &ret); + if (!range) { + pr_err("No 'fsl,bpid-range' property in node %s\n", + node->full_name); + return -EINVAL; + } + if (ret != 8) { + pr_err("'fsl,bpid-range' is not a 2-cell range in node %s\n", + node->full_name); + return -EINVAL; + } + bman_release_bpid_range(range[0], range[1]); + pr_info("Bman: BPID allocator includes range %d:%d\n", + range[0], range[1]); + return 0; +} + +static struct bm_portal_config * __init parse_pcfg(struct device_node *node) +{ + struct bm_portal_config *pcfg; + const u32 *index; + int irq, ret; + + pcfg = kmalloc(sizeof(*pcfg), GFP_KERNEL); + if (!pcfg) { + pr_err("can't allocate portal config"); + return NULL; + } + + if (of_device_is_compatible(node, "fsl,bman-portal-1.0") || + of_device_is_compatible(node, "fsl,bman-portal-1.0.0")) { + bman_ip_rev = BMAN_REV10; + bman_pool_max = 64; + bman_portal_max = 10; + } else if (of_device_is_compatible(node, "fsl,bman-portal-2.0") || + of_device_is_compatible(node, "fsl,bman-portal-2.0.8")) { + bman_ip_rev = BMAN_REV20; + bman_pool_max = 8; + bman_portal_max = 3; + } else if (of_device_is_compatible(node, "fsl,bman-portal-2.1.0")) { + bman_ip_rev = BMAN_REV21; + bman_pool_max = 64; + bman_portal_max = 50; + } else if (of_device_is_compatible(node, "fsl,bman-portal-2.1.1")) { + bman_ip_rev = BMAN_REV21; + bman_pool_max = 64; + bman_portal_max = 25; + } else if (of_device_is_compatible(node, "fsl,bman-portal-2.1.2")) { + bman_ip_rev = BMAN_REV21; + bman_pool_max = 64; + bman_portal_max = 10; + } else if (of_device_is_compatible(node, "fsl,bman-portal-2.1.3")) { + bman_ip_rev = BMAN_REV21; + bman_pool_max = 64; + bman_portal_max = 18; + } + + ret = of_address_to_resource(node, DPA_PORTAL_CE, + &pcfg->addr_phys[DPA_PORTAL_CE]); + if (ret) { + pr_err("Can't get %s property 'reg::CE'\n", node->full_name); + goto err; + } + ret = of_address_to_resource(node, DPA_PORTAL_CI, + &pcfg->addr_phys[DPA_PORTAL_CI]); + if (ret) { + pr_err("Can't get %s property 'reg::CI'\n", node->full_name); + goto err; + } + + index = of_get_property(node, "cell-index", &ret); + if (!index || (ret != 4)) { + pr_err("Can't get %s property '%s'\n", node->full_name, + "cell-index"); + goto err; + } + if (*index >= bman_portal_max) + goto err; + + pcfg->public_cfg.cpu = -1; + + irq = irq_of_parse_and_map(node, 0); + if (irq == NO_IRQ) { + pr_err("Can't get %s property 'interrupts'\n", node->full_name); + goto err; + } + pcfg->public_cfg.irq = irq; + pcfg->public_cfg.index = *index; + bman_depletion_fill(&pcfg->public_cfg.mask); + + pcfg->addr_virt[DPA_PORTAL_CE] = ioremap_prot( + pcfg->addr_phys[DPA_PORTAL_CE].start, + resource_size(&pcfg->addr_phys[DPA_PORTAL_CE]), + 0); + pcfg->addr_virt[DPA_PORTAL_CI] = ioremap_prot( + pcfg->addr_phys[DPA_PORTAL_CI].start, + resource_size(&pcfg->addr_phys[DPA_PORTAL_CI]), + _PAGE_GUARDED | _PAGE_NO_CACHE); + return pcfg; +err: + kfree(pcfg); + return NULL; +} + +static struct bm_portal_config *get_pcfg(struct list_head *list) +{ + struct bm_portal_config *pcfg; + if (list_empty(list)) + return NULL; + pcfg = list_entry(list->prev, struct bm_portal_config, list); + list_del(&pcfg->list); + return pcfg; +} + +static struct bman_portal *init_pcfg(struct bm_portal_config *pcfg) +{ + struct bman_portal *p; + struct cpumask oldmask = *tsk_cpus_allowed(current); + set_cpus_allowed_ptr(current, get_cpu_mask(pcfg->public_cfg.cpu)); + p = bman_create_affine_portal(pcfg); + if (p) { +#ifdef CONFIG_FSL_DPA_PIRQ_SLOW + bman_irqsource_add(BM_PIRQ_RCRI | BM_PIRQ_BSCN); +#endif + pr_info("Bman portal %sinitialised, cpu %d\n", + pcfg->public_cfg.is_shared ? "(shared) " : "", + pcfg->public_cfg.cpu); + } else + pr_crit("Bman portal failure on cpu %d\n", + pcfg->public_cfg.cpu); + set_cpus_allowed_ptr(current, &oldmask); + return p; +} + +static void init_slave(int cpu) +{ + struct bman_portal *p; + struct cpumask oldmask = *tsk_cpus_allowed(current); + set_cpus_allowed_ptr(current, get_cpu_mask(cpu)); + p = bman_create_affine_slave(shared_portals[shared_portals_idx++]); + if (!p) + pr_err("Bman slave portal failure on cpu %d\n", cpu); + else + pr_info("Bman portal %sinitialised, cpu %d\n", "(slave) ", cpu); + set_cpus_allowed_ptr(current, &oldmask); + if (shared_portals_idx >= num_shared_portals) + shared_portals_idx = 0; +} + +/* Bootarg "bportals=[...]" has the same syntax as "qportals=", and so the + * parsing is in dpa_sys.h. The syntax is a comma-separated list of indexes + * and/or ranges of indexes, with each being optionally prefixed by "s" to + * explicitly mark it or them for sharing. + * Eg; + * bportals=s0,1-3,s4 + * means that cpus 1,2,3 get "unshared" portals, cpus 0 and 4 get "shared" + * portals, and any remaining cpus share the portals that are assigned to cpus 0 + * or 4, selected in a round-robin fashion. (In this example, cpu 5 would share + * cpu 0's portal, cpu 6 would share cpu4's portal, and cpu 7 would share cpu + * 0's portal.) */ +static struct cpumask want_unshared __initdata; /* cpus requested without "s" */ +static struct cpumask want_shared __initdata; /* cpus requested with "s" */ + +static int __init parse_bportals(char *str) +{ + return parse_portals_bootarg(str, &want_shared, &want_unshared, + "bportals"); +} +__setup("bportals=", parse_bportals); + +/* Initialise the Bman driver. The meat of this function deals with portals. The + * following describes the flow of portal-handling, the code "steps" refer to + * this description; + * 1. Portal configs are parsed from the device-tree into 'unused_pcfgs', with + * ::cpu==-1. Regions and interrupts are mapped (but interrupts are not + * bound). + * 2. The "want_shared" and "want_unshared" lists (as filled by the + * "bportals=[...]" bootarg) are processed, allocating portals and assigning + * them to cpus, placing them in the relevant list and setting ::cpu as + * appropriate. If no "bportals" bootarg was present, the defaut is to try to + * assign portals to all online cpus at the time of driver initialisation. + * Any failure to allocate portals (when parsing the "want" lists or when + * using default behaviour) will be silently tolerated (the "fixup" logic in + * step 3 will determine what happens in this case). + * 3. Do fixups relative to cpu_online_mask(). If no portals are marked for + * sharing and sharing is required (because not all cpus have been assigned + * portals), then one portal will marked for sharing. Conversely if no + * sharing is required, any portals marked for sharing will not be shared. It + * may be that sharing occurs when it wasn't expected, if portal allocation + * failed to honour all the requested assignments (including the default + * assignments if no bootarg is present). + * 4. Unshared portals are initialised on their respective cpus. + * 5. Shared portals are initialised on their respective cpus. + * 6. Each remaining cpu is initialised to slave to one of the shared portals, + * which are selected in a round-robin fashion. + */ +static __init int bman_init(void) +{ + struct cpumask slave_cpus; + struct cpumask unshared_cpus = *cpu_none_mask; + struct cpumask shared_cpus = *cpu_none_mask; + LIST_HEAD(unused_pcfgs); + LIST_HEAD(unshared_pcfgs); + LIST_HEAD(shared_pcfgs); + struct device_node *dn; + struct bm_portal_config *pcfg; + struct bman_portal *p; + int cpu, ret; + + /* Initialise the Bman (CCSR) device */ + for_each_compatible_node(dn, NULL, "fsl,bman") { + if (!bman_init_ccsr(dn)) + pr_info("Bman err interrupt handler present\n"); + else + pr_err("Bman CCSR setup failed\n"); + } + /* Initialise any declared buffer pools */ + for_each_compatible_node(dn, NULL, "fsl,bpool") { + ret = fsl_bpool_init(dn); + if (ret) + return ret; + } + /* Step 1. See comments at the beginning of the file. */ + for_each_compatible_node(dn, NULL, "fsl,bman-portal") { + if (!of_device_is_available(dn)) + continue; + pcfg = parse_pcfg(dn); + if (pcfg) + list_add_tail(&pcfg->list, &unused_pcfgs); + } + /* Step 2. */ + for_each_cpu(cpu, &want_shared) { + pcfg = get_pcfg(&unused_pcfgs); + if (!pcfg) + break; + pcfg->public_cfg.cpu = cpu; + list_add_tail(&pcfg->list, &shared_pcfgs); + cpumask_set_cpu(cpu, &shared_cpus); + } + for_each_cpu(cpu, &want_unshared) { + if (cpumask_test_cpu(cpu, &shared_cpus)) + continue; + pcfg = get_pcfg(&unused_pcfgs); + if (!pcfg) + break; + pcfg->public_cfg.cpu = cpu; + list_add_tail(&pcfg->list, &unshared_pcfgs); + cpumask_set_cpu(cpu, &unshared_cpus); + } + if (list_empty(&shared_pcfgs) && list_empty(&unshared_pcfgs)) { + /* Default, give an unshared portal to each online cpu */ + for_each_online_cpu(cpu) { + pcfg = get_pcfg(&unused_pcfgs); + if (!pcfg) + break; + pcfg->public_cfg.cpu = cpu; + list_add_tail(&pcfg->list, &unshared_pcfgs); + cpumask_set_cpu(cpu, &unshared_cpus); + } + } + /* Step 3. */ + cpumask_andnot(&slave_cpus, cpu_online_mask, &shared_cpus); + cpumask_andnot(&slave_cpus, &slave_cpus, &unshared_cpus); + if (cpumask_empty(&slave_cpus)) { + /* No sharing required */ + if (!list_empty(&shared_pcfgs)) { + /* Migrate "shared" to "unshared" */ + cpumask_or(&unshared_cpus, &unshared_cpus, + &shared_cpus); + cpumask_clear(&shared_cpus); + list_splice_tail(&shared_pcfgs, &unshared_pcfgs); + INIT_LIST_HEAD(&shared_pcfgs); + } + } else { + /* Sharing required */ + if (list_empty(&shared_pcfgs)) { + /* Migrate one "unshared" to "shared" */ + pcfg = get_pcfg(&unshared_pcfgs); + if (!pcfg) { + pr_crit("No BMan portals available!\n"); + return 0; + } + cpumask_clear_cpu(pcfg->public_cfg.cpu, &unshared_cpus); + cpumask_set_cpu(pcfg->public_cfg.cpu, &shared_cpus); + list_add_tail(&pcfg->list, &shared_pcfgs); + } + } + /* Step 4. */ + list_for_each_entry(pcfg, &unshared_pcfgs, list) { + pcfg->public_cfg.is_shared = 0; + p = init_pcfg(pcfg); + } + /* Step 5. */ + list_for_each_entry(pcfg, &shared_pcfgs, list) { + pcfg->public_cfg.is_shared = 1; + p = init_pcfg(pcfg); + if (p) + shared_portals[num_shared_portals++] = p; + } + /* Step 6. */ + if (!cpumask_empty(&slave_cpus)) + for_each_cpu(cpu, &slave_cpus) + init_slave(cpu); + pr_info("Bman portals initialised\n"); + /* Initialise BPID allocation ranges */ + for_each_compatible_node(dn, NULL, "fsl,bpid-range") { + ret = fsl_bpid_range_init(dn); + if (ret) + return ret; + } + return 0; +} +subsys_initcall(bman_init); diff --git a/drivers/staging/fsl_qbman/bman_high.c b/drivers/staging/fsl_qbman/bman_high.c new file mode 100644 index 0000000..ecdfae7 --- /dev/null +++ b/drivers/staging/fsl_qbman/bman_high.c @@ -0,0 +1,1009 @@ +/* Copyright 2008-2012 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "bman_low.h" + +/* Compilation constants */ +#define RCR_THRESH 2 /* reread h/w CI when running out of space */ +#define IRQNAME "BMan portal %d" +#define MAX_IRQNAME 16 /* big enough for "BMan portal %d" */ + +struct bman_portal { + struct bm_portal p; + /* 2-element array. pools[0] is mask, pools[1] is snapshot. */ + struct bman_depletion *pools; + int thresh_set; + unsigned long irq_sources; + u32 slowpoll; /* only used when interrupts are off */ +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC + struct bman_pool *rcri_owned; /* only 1 release WAIT_SYNC at a time */ +#endif +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE + raw_spinlock_t sharing_lock; /* only used if is_shared */ + int is_shared; + struct bman_portal *sharing_redirect; +#endif + /* When the cpu-affine portal is activated, this is non-NULL */ + const struct bm_portal_config *config; + /* 64-entry hash-table of pool objects that are tracking depletion + * entry/exit (ie. BMAN_POOL_FLAG_DEPLETION). This isn't fast-path, so + * we're not fussy about cache-misses and so forth - whereas the above + * members should all fit in one cacheline. + * BTW, with 64 entries in the hash table and 64 buffer pools to track, + * you'll never guess the hash-function ... */ + struct bman_pool *cb[64]; + char irqname[MAX_IRQNAME]; +}; + +/* For an explanation of the locking, redirection, or affine-portal logic, + * please consult the Qman driver for details. This is the same, only simpler + * (no fiddly Qman-specific bits.) */ +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE +#define PORTAL_IRQ_LOCK(p, irqflags) \ + do { \ + if ((p)->is_shared) \ + raw_spin_lock_irqsave(&(p)->sharing_lock, irqflags); \ + else \ + local_irq_save(irqflags); \ + } while (0) +#define PORTAL_IRQ_UNLOCK(p, irqflags) \ + do { \ + if ((p)->is_shared) \ + raw_spin_unlock_irqrestore(&(p)->sharing_lock, \ + irqflags); \ + else \ + local_irq_restore(irqflags); \ + } while (0) +#else +#define PORTAL_IRQ_LOCK(p, irqflags) local_irq_save(irqflags) +#define PORTAL_IRQ_UNLOCK(p, irqflags) local_irq_restore(irqflags) +#endif + +static cpumask_t affine_mask; +static DEFINE_SPINLOCK(affine_mask_lock); +static DEFINE_PER_CPU(struct bman_portal, bman_affine_portal); +static inline struct bman_portal *get_raw_affine_portal(void) +{ + return &get_cpu_var(bman_affine_portal); +} +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE +static inline struct bman_portal *get_affine_portal(void) +{ + struct bman_portal *p = get_raw_affine_portal(); + if (p->sharing_redirect) + return p->sharing_redirect; + return p; +} +#else +#define get_affine_portal() get_raw_affine_portal() +#endif +static inline void put_affine_portal(void) +{ + put_cpu_var(bman_affine_portal); +} +static inline struct bman_portal *get_poll_portal(void) +{ + return &__get_cpu_var(bman_affine_portal); +} +#define put_poll_portal() do { ; } while (0) + +/* GOTCHA: this object type refers to a pool, it isn't *the* pool. There may be + * more than one such object per Bman buffer pool, eg. if different users of the + * pool are operating via different portals. */ +struct bman_pool { + struct bman_pool_params params; + /* Used for hash-table admin when using depletion notifications. */ + struct bman_portal *portal; + struct bman_pool *next; + /* stockpile state - NULL unless BMAN_POOL_FLAG_STOCKPILE is set */ + struct bm_buffer *sp; + unsigned int sp_fill; +#ifdef CONFIG_FSL_DPA_CHECKING + atomic_t in_use; +#endif +}; + +/* (De)Registration of depletion notification callbacks */ +static void depletion_link(struct bman_portal *portal, struct bman_pool *pool) +{ + __maybe_unused unsigned long irqflags; + pool->portal = portal; + PORTAL_IRQ_LOCK(portal, irqflags); + pool->next = portal->cb[pool->params.bpid]; + portal->cb[pool->params.bpid] = pool; + if (!pool->next) + /* First object for that bpid on this portal, enable the BSCN + * mask bit. */ + bm_isr_bscn_mask(&portal->p, pool->params.bpid, 1); + PORTAL_IRQ_UNLOCK(portal, irqflags); +} +static void depletion_unlink(struct bman_pool *pool) +{ + struct bman_pool *it, *last = NULL; + struct bman_pool **base = &pool->portal->cb[pool->params.bpid]; + __maybe_unused unsigned long irqflags; + PORTAL_IRQ_LOCK(pool->portal, irqflags); + it = *base; /* <-- gotcha, don't do this prior to the irq_save */ + while (it != pool) { + last = it; + it = it->next; + } + if (!last) + *base = pool->next; + else + last->next = pool->next; + if (!last && !pool->next) { + /* Last object for that bpid on this portal, disable the BSCN + * mask bit. */ + bm_isr_bscn_mask(&pool->portal->p, pool->params.bpid, 0); + /* And "forget" that we last saw this pool as depleted */ + bman_depletion_unset(&pool->portal->pools[1], + pool->params.bpid); + } + PORTAL_IRQ_UNLOCK(pool->portal, irqflags); +} + +/* In the case that the application's core loop calls qman_poll() and + * bman_poll(), we ought to balance how often we incur the overheads of the + * slow-path poll. We'll use two decrementer sources. The idle decrementer + * constant is used when the last slow-poll detected no work to do, and the busy + * decrementer constant when the last slow-poll had work to do. */ +#define SLOW_POLL_IDLE 1000 +#define SLOW_POLL_BUSY 10 +static u32 __poll_portal_slow(struct bman_portal *p, u32 is); + +/* Portal interrupt handler */ +static irqreturn_t portal_isr(__always_unused int irq, void *ptr) +{ + struct bman_portal *p = ptr; + u32 clear = p->irq_sources; + u32 is = bm_isr_status_read(&p->p) & p->irq_sources; + clear |= __poll_portal_slow(p, is); + bm_isr_status_clear(&p->p, clear); + return IRQ_HANDLED; +} + +struct bman_portal *bman_create_affine_portal( + const struct bm_portal_config *config) +{ + struct bman_portal *portal = get_raw_affine_portal(); + struct bm_portal *__p = &portal->p; + const struct bman_depletion *pools = &config->public_cfg.mask; + int ret; + u8 bpid = 0; + + /* A criteria for calling this function (from bman_driver.c) is that + * we're already affine to the cpu and won't schedule onto another cpu. + * This means we can put_affine_portal() and yet continue to use + * "portal", which in turn means aspects of this routine can sleep. */ + put_affine_portal(); + + /* prep the low-level portal struct with the mapped addresses from the + * config, everything that follows depends on it and "config" is more + * for (de)reference... */ + __p->addr.addr_ce = config->addr_virt[DPA_PORTAL_CE]; + __p->addr.addr_ci = config->addr_virt[DPA_PORTAL_CI]; + if (bm_rcr_init(__p, bm_rcr_pvb, bm_rcr_cce)) { + pr_err("Bman RCR initialisation failed\n"); + goto fail_rcr; + } + if (bm_mc_init(__p)) { + pr_err("Bman MC initialisation failed\n"); + goto fail_mc; + } + if (bm_isr_init(__p)) { + pr_err("Bman ISR initialisation failed\n"); + goto fail_isr; + } + portal->pools = kmalloc(2 * sizeof(*pools), GFP_KERNEL); + if (!portal->pools) + goto fail_pools; + portal->pools[0] = *pools; + bman_depletion_init(portal->pools + 1); + while (bpid < bman_pool_max) { + /* Default to all BPIDs disabled, we enable as required at + * run-time. */ + bm_isr_bscn_mask(__p, bpid, 0); + bpid++; + } + portal->slowpoll = 0; +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC + portal->rcri_owned = NULL; +#endif +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE + raw_spin_lock_init(&portal->sharing_lock); + portal->is_shared = config->public_cfg.is_shared; + portal->sharing_redirect = NULL; +#endif + memset(&portal->cb, 0, sizeof(portal->cb)); + /* Write-to-clear any stale interrupt status bits */ + bm_isr_disable_write(__p, 0xffffffff); + portal->irq_sources = 0; + bm_isr_enable_write(__p, portal->irq_sources); + bm_isr_status_clear(__p, 0xffffffff); + snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, config->public_cfg.cpu); + if (request_irq(config->public_cfg.irq, portal_isr, 0, portal->irqname, + portal)) { + pr_err("request_irq() failed\n"); + goto fail_irq; + } + if ((config->public_cfg.cpu != -1) && + irq_can_set_affinity(config->public_cfg.irq) && + irq_set_affinity(config->public_cfg.irq, + cpumask_of(config->public_cfg.cpu))) { + pr_err("irq_set_affinity() failed\n"); + goto fail_affinity; + } + /* Need RCR to be empty before continuing */ + ret = bm_rcr_get_fill(__p); + if (ret) { + pr_err("Bman RCR unclean\n"); + goto fail_rcr_empty; + } + /* Success */ + portal->config = config; + spin_lock(&affine_mask_lock); + cpumask_set_cpu(config->public_cfg.cpu, &affine_mask); + spin_unlock(&affine_mask_lock); + bm_isr_disable_write(__p, 0); + bm_isr_uninhibit(__p); + return portal; +fail_rcr_empty: +fail_affinity: + free_irq(config->public_cfg.irq, portal); +fail_irq: + if (portal->pools) + kfree(portal->pools); +fail_pools: + bm_isr_finish(__p); +fail_isr: + bm_mc_finish(__p); +fail_mc: + bm_rcr_finish(__p); +fail_rcr: + return NULL; +} + +struct bman_portal *bman_create_affine_slave(struct bman_portal *redirect) +{ +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE + struct bman_portal *p = get_raw_affine_portal(); + BUG_ON(p->config); + BUG_ON(p->is_shared); + BUG_ON(!redirect->config->public_cfg.is_shared); + p->irq_sources = 0; + p->sharing_redirect = redirect; + put_affine_portal(); + return p; +#else + BUG(); + return NULL; +#endif +} + +const struct bm_portal_config *bman_destroy_affine_portal(void) +{ + struct bman_portal *bm = get_raw_affine_portal(); + const struct bm_portal_config *pcfg; +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE + if (bm->sharing_redirect) { + bm->sharing_redirect = NULL; + put_affine_portal(); + return NULL; + } + bm->is_shared = 0; +#endif + pcfg = bm->config; + bm_rcr_cce_update(&bm->p); + bm_rcr_cce_update(&bm->p); + free_irq(pcfg->public_cfg.irq, bm); + kfree(bm->pools); + bm_isr_finish(&bm->p); + bm_mc_finish(&bm->p); + bm_rcr_finish(&bm->p); + bm->config = NULL; + spin_lock(&affine_mask_lock); + cpumask_clear_cpu(pcfg->public_cfg.cpu, &affine_mask); + spin_unlock(&affine_mask_lock); + put_affine_portal(); + return pcfg; +} + +/* When release logic waits on available RCR space, we need a global waitqueue + * in the case of "affine" use (as the waits wake on different cpus which means + * different portals - so we can't wait on any per-portal waitqueue). */ +static DECLARE_WAIT_QUEUE_HEAD(affine_queue); + +static u32 __poll_portal_slow(struct bman_portal *p, u32 is) +{ + struct bman_depletion tmp; + u32 ret = is; + + /* There is a gotcha to be aware of. If we do the query before clearing + * the status register, we may miss state changes that occur between the + * two. If we write to clear the status register before the query, the + * cache-enabled query command may overtake the status register write + * unless we use a heavyweight sync (which we don't want). Instead, we + * write-to-clear the status register then *read it back* before doing + * the query, hence the odd while loop with the 'is' accumulation. */ + if (is & BM_PIRQ_BSCN) { + struct bm_mc_result *mcr; + __maybe_unused unsigned long irqflags; + unsigned int i, j; + u32 __is; + bm_isr_status_clear(&p->p, BM_PIRQ_BSCN); + while ((__is = bm_isr_status_read(&p->p)) & BM_PIRQ_BSCN) { + is |= __is; + bm_isr_status_clear(&p->p, BM_PIRQ_BSCN); + } + is &= ~BM_PIRQ_BSCN; + PORTAL_IRQ_LOCK(p, irqflags); + bm_mc_start(&p->p); + bm_mc_commit(&p->p, BM_MCC_VERB_CMD_QUERY); + while (!(mcr = bm_mc_result(&p->p))) + cpu_relax(); + tmp = mcr->query.ds.state; + PORTAL_IRQ_UNLOCK(p, irqflags); + for (i = 0; i < 2; i++) { + int idx = i * 32; + /* tmp is a mask of currently-depleted pools. + * pools[0] is mask of those we care about. + * pools[1] is our previous view (we only want to + * be told about changes). */ + tmp.__state[i] &= p->pools[0].__state[i]; + if (tmp.__state[i] == p->pools[1].__state[i]) + /* fast-path, nothing to see, move along */ + continue; + for (j = 0; j <= 31; j++, idx++) { + struct bman_pool *pool = p->cb[idx]; + int b4 = bman_depletion_get(&p->pools[1], idx); + int af = bman_depletion_get(&tmp, idx); + if (b4 == af) + continue; + while (pool) { + pool->params.cb(p, pool, + pool->params.cb_ctx, af); + pool = pool->next; + } + } + } + p->pools[1] = tmp; + } + + if (is & BM_PIRQ_RCRI) { + __maybe_unused unsigned long irqflags; + PORTAL_IRQ_LOCK(p, irqflags); + bm_rcr_cce_update(&p->p); +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC + /* If waiting for sync, we only cancel the interrupt threshold + * when the ring utilisation hits zero. */ + if (p->rcri_owned) { + if (!bm_rcr_get_fill(&p->p)) { + p->rcri_owned = NULL; + bm_rcr_set_ithresh(&p->p, 0); + } + } else +#endif + bm_rcr_set_ithresh(&p->p, 0); + PORTAL_IRQ_UNLOCK(p, irqflags); + wake_up(&affine_queue); + bm_isr_status_clear(&p->p, BM_PIRQ_RCRI); + is &= ~BM_PIRQ_RCRI; + } + + /* There should be no status register bits left undefined */ + DPA_ASSERT(!is); + return ret; +} + +const struct bman_portal_config *bman_get_portal_config(void) +{ + struct bman_portal *p = get_affine_portal(); + const struct bman_portal_config *ret = &p->config->public_cfg; + put_affine_portal(); + return ret; +} +EXPORT_SYMBOL(bman_get_portal_config); + +u32 bman_irqsource_get(void) +{ + struct bman_portal *p = get_raw_affine_portal(); + u32 ret = p->irq_sources & BM_PIRQ_VISIBLE; + put_affine_portal(); + return ret; +} +EXPORT_SYMBOL(bman_irqsource_get); + +int bman_irqsource_add(__maybe_unused u32 bits) +{ + struct bman_portal *p = get_raw_affine_portal(); + int ret = 0; +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE + if (p->sharing_redirect) + ret = -EINVAL; + else +#endif + { + __maybe_unused unsigned long irqflags; + PORTAL_IRQ_LOCK(p, irqflags); + set_bits(bits & BM_PIRQ_VISIBLE, &p->irq_sources); + bm_isr_enable_write(&p->p, p->irq_sources); + PORTAL_IRQ_UNLOCK(p, irqflags); + } + put_affine_portal(); + return ret; +} +EXPORT_SYMBOL(bman_irqsource_add); + +int bman_irqsource_remove(u32 bits) +{ + struct bman_portal *p = get_raw_affine_portal(); + __maybe_unused unsigned long irqflags; + u32 ier; +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE + if (p->sharing_redirect) { + put_affine_portal(); + return -EINVAL; + } +#endif + /* Our interrupt handler only processes+clears status register bits that + * are in p->irq_sources. As we're trimming that mask, if one of them + * were to assert in the status register just before we remove it from + * the enable register, there would be an interrupt-storm when we + * release the IRQ lock. So we wait for the enable register update to + * take effect in h/w (by reading it back) and then clear all other bits + * in the status register. Ie. we clear them from ISR once it's certain + * IER won't allow them to reassert. */ + PORTAL_IRQ_LOCK(p, irqflags); + bits &= BM_PIRQ_VISIBLE; + clear_bits(bits, &p->irq_sources); + bm_isr_enable_write(&p->p, p->irq_sources); + ier = bm_isr_enable_read(&p->p); + /* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a + * data-dependency, ie. to protect against re-ordering. */ + bm_isr_status_clear(&p->p, ~ier); + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + return 0; +} +EXPORT_SYMBOL(bman_irqsource_remove); + +const cpumask_t *bman_affine_cpus(void) +{ + return &affine_mask; +} +EXPORT_SYMBOL(bman_affine_cpus); + +u32 bman_poll_slow(void) +{ + struct bman_portal *p = get_poll_portal(); + u32 ret; +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE + if (unlikely(p->sharing_redirect)) + ret = (u32)-1; + else +#endif + { + u32 is = bm_isr_status_read(&p->p) & ~p->irq_sources; + ret = __poll_portal_slow(p, is); + bm_isr_status_clear(&p->p, ret); + } + put_poll_portal(); + return ret; +} +EXPORT_SYMBOL(bman_poll_slow); + +/* Legacy wrapper */ +void bman_poll(void) +{ + struct bman_portal *p = get_poll_portal(); +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE + if (unlikely(p->sharing_redirect)) + goto done; +#endif + if (!(p->slowpoll--)) { + u32 is = bm_isr_status_read(&p->p) & ~p->irq_sources; + u32 active = __poll_portal_slow(p, is); + if (active) + p->slowpoll = SLOW_POLL_BUSY; + else + p->slowpoll = SLOW_POLL_IDLE; + } +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE +done: +#endif + put_poll_portal(); +} +EXPORT_SYMBOL(bman_poll); + +static const u32 zero_thresholds[4] = {0, 0, 0, 0}; + +struct bman_pool *bman_new_pool(const struct bman_pool_params *params) +{ + struct bman_pool *pool = NULL; + u32 bpid; + + if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID) { + int ret = bman_alloc_bpid(&bpid); + if (ret) + return NULL; + } else { + if (params->bpid >= bman_pool_max) + return NULL; + bpid = params->bpid; + } +#ifdef CONFIG_FSL_BMAN_CONFIG + if (params->flags & BMAN_POOL_FLAG_THRESH) { + int ret = bm_pool_set(bpid, params->thresholds); + if (ret) + goto err; + } +#else + if (params->flags & BMAN_POOL_FLAG_THRESH) + goto err; +#endif + pool = kmalloc(sizeof(*pool), GFP_KERNEL); + if (!pool) + goto err; + pool->sp = NULL; + pool->sp_fill = 0; + pool->params = *params; +#ifdef CONFIG_FSL_DPA_CHECKING + atomic_set(&pool->in_use, 1); +#endif + if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID) + pool->params.bpid = bpid; + if (params->flags & BMAN_POOL_FLAG_STOCKPILE) { + pool->sp = kmalloc(sizeof(struct bm_buffer) * BMAN_STOCKPILE_SZ, + GFP_KERNEL); + if (!pool->sp) + goto err; + } + if (pool->params.flags & BMAN_POOL_FLAG_DEPLETION) { + struct bman_portal *p = get_affine_portal(); + if (!p->pools || !bman_depletion_get(&p->pools[0], bpid)) { + pr_err("Depletion events disabled for bpid %d\n", bpid); + goto err; + } + depletion_link(p, pool); + put_affine_portal(); + } + return pool; +err: +#ifdef CONFIG_FSL_BMAN_CONFIG + if (params->flags & BMAN_POOL_FLAG_THRESH) + bm_pool_set(bpid, zero_thresholds); +#endif + if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID) + bman_release_bpid(bpid); + if (pool) { + if (pool->sp) + kfree(pool->sp); + kfree(pool); + } + return NULL; +} +EXPORT_SYMBOL(bman_new_pool); + +void bman_free_pool(struct bman_pool *pool) +{ +#ifdef CONFIG_FSL_BMAN_CONFIG + if (pool->params.flags & BMAN_POOL_FLAG_THRESH) + bm_pool_set(pool->params.bpid, zero_thresholds); +#endif + if (pool->params.flags & BMAN_POOL_FLAG_DEPLETION) + depletion_unlink(pool); + if (pool->params.flags & BMAN_POOL_FLAG_STOCKPILE) { + if (pool->sp_fill) + pr_err("Stockpile not flushed, has %u in bpid %u.\n", + pool->sp_fill, pool->params.bpid); + kfree(pool->sp); + pool->sp = NULL; + pool->params.flags ^= BMAN_POOL_FLAG_STOCKPILE; + } + if (pool->params.flags & BMAN_POOL_FLAG_DYNAMIC_BPID) { + /* When releasing a BPID to the dynamic allocator, that pool + * must be *empty*. This code makes it so by dropping everything + * into the bit-bucket. This ignores whether or not it was a + * mistake (or a leak) on the caller's part not to drain the + * pool beforehand. */ + struct bm_buffer bufs[8]; + int ret = 0; + do { + /* Acquire is all-or-nothing, so we drain in 8s, then in + * 1s for the remainder. */ + if (ret != 1) + ret = bman_acquire(pool, bufs, 8, 0); + if (ret < 8) + ret = bman_acquire(pool, bufs, 1, 0); + } while (ret > 0); + bman_release_bpid(pool->params.bpid); + } + kfree(pool); +} +EXPORT_SYMBOL(bman_free_pool); + +const struct bman_pool_params *bman_get_params(const struct bman_pool *pool) +{ + return &pool->params; +} +EXPORT_SYMBOL(bman_get_params); + +static noinline void update_rcr_ci(struct bman_portal *p, u8 avail) +{ + if (avail) + bm_rcr_cce_prefetch(&p->p); + else + bm_rcr_cce_update(&p->p); +} + +int bman_rcr_is_empty(void) +{ + __maybe_unused unsigned long irqflags; + struct bman_portal *p = get_affine_portal(); + u8 avail; + + PORTAL_IRQ_LOCK(p, irqflags); + update_rcr_ci(p, 0); + avail = bm_rcr_get_fill(&p->p); + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + return (avail == 0); +} +EXPORT_SYMBOL(bman_rcr_is_empty); + +static inline struct bm_rcr_entry *try_rel_start(struct bman_portal **p, +#ifdef CONFIG_FSL_DPA_CAN_WAIT + __maybe_unused struct bman_pool *pool, +#endif + __maybe_unused unsigned long *irqflags, + __maybe_unused u32 flags) +{ + struct bm_rcr_entry *r; + u8 avail; + + *p = get_affine_portal(); + PORTAL_IRQ_LOCK(*p, (*irqflags)); +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC + if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) && + (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) { + if ((*p)->rcri_owned) { + PORTAL_IRQ_UNLOCK(*p, (*irqflags)); + put_affine_portal(); + return NULL; + } + (*p)->rcri_owned = pool; + } +#endif + avail = bm_rcr_get_avail(&(*p)->p); + if (avail < 2) + update_rcr_ci(*p, avail); + r = bm_rcr_start(&(*p)->p); + if (unlikely(!r)) { +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC + if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) && + (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) + (*p)->rcri_owned = NULL; +#endif + PORTAL_IRQ_UNLOCK(*p, (*irqflags)); + put_affine_portal(); + } + return r; +} + +#ifdef CONFIG_FSL_DPA_CAN_WAIT +static noinline struct bm_rcr_entry *__wait_rel_start(struct bman_portal **p, + struct bman_pool *pool, + __maybe_unused unsigned long *irqflags, + u32 flags) +{ + struct bm_rcr_entry *rcr = try_rel_start(p, pool, irqflags, flags); + if (!rcr) + bm_rcr_set_ithresh(&(*p)->p, 1); + return rcr; +} + +static noinline struct bm_rcr_entry *wait_rel_start(struct bman_portal **p, + struct bman_pool *pool, + __maybe_unused unsigned long *irqflags, + u32 flags) +{ + struct bm_rcr_entry *rcr; +#ifndef CONFIG_FSL_DPA_CAN_WAIT_SYNC + pool = NULL; +#endif + if (flags & BMAN_RELEASE_FLAG_WAIT_INT) + wait_event_interruptible(affine_queue, + (rcr = __wait_rel_start(p, pool, irqflags, flags))); + else + wait_event(affine_queue, + (rcr = __wait_rel_start(p, pool, irqflags, flags))); + return rcr; +} +#endif + +/* to facilitate better copying of bufs into the ring without either (a) copying + * noise into the first byte (prematurely triggering the command), nor (b) being + * very inefficient by copying small fields using read-modify-write */ +struct overlay_bm_buffer { + u32 first; + u32 second; +}; + +static inline int __bman_release(struct bman_pool *pool, + const struct bm_buffer *bufs, u8 num, u32 flags) +{ + struct bman_portal *p; + struct bm_rcr_entry *r; + struct overlay_bm_buffer *o_dest; + struct overlay_bm_buffer *o_src = (struct overlay_bm_buffer *)&bufs[0]; + __maybe_unused unsigned long irqflags; + u32 i = num - 1; + +#ifdef CONFIG_FSL_DPA_CAN_WAIT + if (flags & BMAN_RELEASE_FLAG_WAIT) + r = wait_rel_start(&p, pool, &irqflags, flags); + else + r = try_rel_start(&p, pool, &irqflags, flags); +#else + r = try_rel_start(&p, &irqflags, flags); +#endif + if (!r) + return -EBUSY; + /* We can copy all but the first entry, as this can trigger badness + * with the valid-bit. Use the overlay to mask the verb byte. */ + o_dest = (struct overlay_bm_buffer *)&r->bufs[0]; + o_dest->first = (o_src->first & 0x0000ffff) | + (((u32)pool->params.bpid << 16) & 0x00ff0000); + o_dest->second = o_src->second; + if (i) + copy_words(&r->bufs[1], &bufs[1], i * sizeof(bufs[0])); + bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE | + (num & BM_RCR_VERB_BUFCOUNT_MASK)); +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC + /* if we wish to sync we need to set the threshold after h/w sees the + * new ring entry. As we're mixing cache-enabled and cache-inhibited + * accesses, this requires a heavy-weight sync. */ + if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) && + (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) { + hwsync(); + bm_rcr_set_ithresh(&p->p, 1); + } +#endif + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC + if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) && + (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) { + if (flags & BMAN_RELEASE_FLAG_WAIT_INT) + wait_event_interruptible(affine_queue, + (p->rcri_owned != pool)); + else + wait_event(affine_queue, (p->rcri_owned != pool)); + } +#endif + return 0; +} + +int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num, + u32 flags) +{ + int ret = 0; +#ifdef CONFIG_FSL_DPA_CHECKING + if (!num || (num > 8)) + return -EINVAL; + if (pool->params.flags & BMAN_POOL_FLAG_NO_RELEASE) + return -EINVAL; +#endif + /* Without stockpile, this API is a pass-through to the h/w operation */ + if (!(pool->params.flags & BMAN_POOL_FLAG_STOCKPILE)) + return __bman_release(pool, bufs, num, flags); +#ifdef CONFIG_FSL_DPA_CHECKING + if (!atomic_dec_and_test(&pool->in_use)) { + pr_crit("Parallel attempts to enter bman_released() detected."); + panic("only one instance of bman_released/acquired allowed"); + } +#endif + /* This needs some explanation. Adding the given buffers may take the + * stockpile over the threshold, but in fact the stockpile may already + * *be* over the threshold if a previous release-to-hw attempt had + * failed. So we have 3 cases to cover; + * 1. we add to the stockpile and don't hit the threshold, + * 2. we add to the stockpile, hit the threshold and release-to-hw, + * 3. we have to release-to-hw before adding to the stockpile + * (not enough room in the stockpile for case 2). + * Our constraints on thresholds guarantee that in case 3, there must be + * at least 8 bufs already in the stockpile, so all release-to-hw ops + * are for 8 bufs. Despite all this, the API must indicate whether the + * given buffers were taken off the caller's hands, irrespective of + * whether a release-to-hw was attempted. */ + while (num) { + /* Add buffers to stockpile if they fit */ + if ((pool->sp_fill + num) < BMAN_STOCKPILE_SZ) { + copy_words(pool->sp + pool->sp_fill, bufs, + sizeof(struct bm_buffer) * num); + pool->sp_fill += num; + num = 0; /* --> will return success no matter what */ + } + /* Do hw op if hitting the high-water threshold */ + if ((pool->sp_fill + num) >= BMAN_STOCKPILE_HIGH) { + ret = __bman_release(pool, + pool->sp + (pool->sp_fill - 8), 8, flags); + if (ret) { + ret = (num ? ret : 0); + goto release_done; + } + pool->sp_fill -= 8; + } + } +release_done: +#ifdef CONFIG_FSL_DPA_CHECKING + atomic_inc(&pool->in_use); +#endif + return ret; +} +EXPORT_SYMBOL(bman_release); + +static inline int __bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, + u8 num) +{ + struct bman_portal *p = get_affine_portal(); + struct bm_mc_command *mcc; + struct bm_mc_result *mcr; + __maybe_unused unsigned long irqflags; + int ret; + + PORTAL_IRQ_LOCK(p, irqflags); + mcc = bm_mc_start(&p->p); + mcc->acquire.bpid = pool->params.bpid; + bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE | + (num & BM_MCC_VERB_ACQUIRE_BUFCOUNT)); + while (!(mcr = bm_mc_result(&p->p))) + cpu_relax(); + ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT; + if (bufs) + copy_words(&bufs[0], &mcr->acquire.bufs[0], + num * sizeof(bufs[0])); + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + if (ret != num) + ret = -ENOMEM; + return ret; +} + +int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num, + u32 flags) +{ + int ret = 0; +#ifdef CONFIG_FSL_DPA_CHECKING + if (!num || (num > 8)) + return -EINVAL; + if (pool->params.flags & BMAN_POOL_FLAG_ONLY_RELEASE) + return -EINVAL; +#endif + /* Without stockpile, this API is a pass-through to the h/w operation */ + if (!(pool->params.flags & BMAN_POOL_FLAG_STOCKPILE)) + return __bman_acquire(pool, bufs, num); +#ifdef CONFIG_FSL_DPA_CHECKING + if (!atomic_dec_and_test(&pool->in_use)) { + pr_crit("Parallel attempts to enter bman_acquire() detected."); + panic("only one instance of bman_released/acquired allowed"); + } +#endif + /* Only need a h/w op if we'll hit the low-water thresh */ + if (!(flags & BMAN_ACQUIRE_FLAG_STOCKPILE) && + (pool->sp_fill <= (BMAN_STOCKPILE_LOW + num))) { + /* refill stockpile with max amount, but if max amount + * isn't available, try amount the user wants */ + int bufcount = 8; + ret = __bman_acquire(pool, pool->sp + pool->sp_fill, bufcount); + if (ret < 0 && bufcount != num) { + bufcount = num; + /* Maybe buffer pool has less than 8 */ + ret = __bman_acquire(pool, pool->sp + pool->sp_fill, + bufcount); + } + if (ret < 0) + goto hw_starved; + DPA_ASSERT(ret == bufcount); + pool->sp_fill += bufcount; + } else { +hw_starved: + if (pool->sp_fill < num) { + ret = -ENOMEM; + goto acquire_done; + } + } + copy_words(bufs, pool->sp + (pool->sp_fill - num), + sizeof(struct bm_buffer) * num); + pool->sp_fill -= num; + ret = num; +acquire_done: +#ifdef CONFIG_FSL_DPA_CHECKING + atomic_inc(&pool->in_use); +#endif + return ret; +} +EXPORT_SYMBOL(bman_acquire); + +int bman_flush_stockpile(struct bman_pool *pool, u32 flags) +{ + u8 num; + int ret; + + while (pool->sp_fill) { + num = ((pool->sp_fill > 8) ? 8 : pool->sp_fill); + ret = __bman_release(pool, pool->sp + (pool->sp_fill - num), + num, flags); + if (ret) + return ret; + pool->sp_fill -= num; + } + return 0; +} +EXPORT_SYMBOL(bman_flush_stockpile); + +int bman_query_pools(struct bm_pool_state *state) +{ + struct bman_portal *p = get_affine_portal(); + struct bm_mc_result *mcr; + __maybe_unused unsigned long irqflags; + + PORTAL_IRQ_LOCK(p, irqflags); + bm_mc_start(&p->p); + bm_mc_commit(&p->p, BM_MCC_VERB_CMD_QUERY); + while (!(mcr = bm_mc_result(&p->p))) + cpu_relax(); + DPA_ASSERT((mcr->verb & BM_MCR_VERB_CMD_MASK) == BM_MCR_VERB_CMD_QUERY); + *state = mcr->query; + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + return 0; +} +EXPORT_SYMBOL(bman_query_pools); + +#ifdef CONFIG_FSL_BMAN_CONFIG +u32 bman_query_free_buffers(struct bman_pool *pool) +{ + return bm_pool_free_buffers(pool->params.bpid); +} +EXPORT_SYMBOL(bman_query_free_buffers); + +int bman_update_pool_thresholds(struct bman_pool *pool, const u32 *thresholds) +{ + u32 bpid; + + bpid = bman_get_params(pool)->bpid; + + return bm_pool_set(bpid, thresholds); +} +EXPORT_SYMBOL(bman_update_pool_thresholds); +#endif diff --git a/drivers/staging/fsl_qbman/bman_low.h b/drivers/staging/fsl_qbman/bman_low.h new file mode 100644 index 0000000..262bae7 --- /dev/null +++ b/drivers/staging/fsl_qbman/bman_low.h @@ -0,0 +1,494 @@ +/* Copyright 2008-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "bman_private.h" + +/***************************/ +/* Portal register assists */ +/***************************/ + +/* Cache-inhibited register offsets */ +#define REG_RCR_PI_CINH 0x0000 +#define REG_RCR_CI_CINH 0x0004 +#define REG_RCR_ITR 0x0008 +#define REG_CFG 0x0100 +#define REG_SCN(n) (0x0200 + ((n) << 2)) +#define REG_ISR 0x0e00 + +/* Cache-enabled register offsets */ +#define CL_CR 0x0000 +#define CL_RR0 0x0100 +#define CL_RR1 0x0140 +#define CL_RCR 0x1000 +#define CL_RCR_PI_CENA 0x3000 +#define CL_RCR_CI_CENA 0x3100 + +/* BTW, the drivers (and h/w programming model) already obtain the required + * synchronisation for portal accesses via lwsync(), hwsync(), and + * data-dependencies. Use of barrier()s or other order-preserving primitives + * simply degrade performance. Hence the use of the __raw_*() interfaces, which + * simply ensure that the compiler treats the portal registers as volatile (ie. + * non-coherent). */ + +/* Cache-inhibited register access. */ +#define __bm_in(bm, o) __raw_readl((bm)->addr_ci + (o)) +#define __bm_out(bm, o, val) __raw_writel((val), (bm)->addr_ci + (o)) +#define bm_in(reg) __bm_in(&portal->addr, REG_##reg) +#define bm_out(reg, val) __bm_out(&portal->addr, REG_##reg, val) + +/* Cache-enabled (index) register access */ +#define __bm_cl_touch_ro(bm, o) dcbt_ro((bm)->addr_ce + (o)) +#define __bm_cl_touch_rw(bm, o) dcbt_rw((bm)->addr_ce + (o)) +#define __bm_cl_in(bm, o) __raw_readl((bm)->addr_ce + (o)) +#define __bm_cl_out(bm, o, val) \ + do { \ + u32 *__tmpclout = (bm)->addr_ce + (o); \ + __raw_writel((val), __tmpclout); \ + dcbf(__tmpclout); \ + } while (0) +#define __bm_cl_invalidate(bm, o) dcbi((bm)->addr_ce + (o)) +#define bm_cl_touch_ro(reg) __bm_cl_touch_ro(&portal->addr, CL_##reg##_CENA) +#define bm_cl_touch_rw(reg) __bm_cl_touch_rw(&portal->addr, CL_##reg##_CENA) +#define bm_cl_in(reg) __bm_cl_in(&portal->addr, CL_##reg##_CENA) +#define bm_cl_out(reg, val) __bm_cl_out(&portal->addr, CL_##reg##_CENA, val) +#define bm_cl_invalidate(reg) __bm_cl_invalidate(&portal->addr, CL_##reg##_CENA) + +/* Cyclic helper for rings. FIXME: once we are able to do fine-grain perf + * analysis, look at using the "extra" bit in the ring index registers to avoid + * cyclic issues. */ +static inline u8 cyc_diff(u8 ringsize, u8 first, u8 last) +{ + /* 'first' is included, 'last' is excluded */ + if (first <= last) + return last - first; + return ringsize + last - first; +} + +/* Portal modes. + * Enum types; + * pmode == production mode + * cmode == consumption mode, + * Enum values use 3 letter codes. First letter matches the portal mode, + * remaining two letters indicate; + * ci == cache-inhibited portal register + * ce == cache-enabled portal register + * vb == in-band valid-bit (cache-enabled) + */ +enum bm_rcr_pmode { /* matches BCSP_CFG::RPM */ + bm_rcr_pci = 0, /* PI index, cache-inhibited */ + bm_rcr_pce = 1, /* PI index, cache-enabled */ + bm_rcr_pvb = 2 /* valid-bit */ +}; +enum bm_rcr_cmode { /* s/w-only */ + bm_rcr_cci, /* CI index, cache-inhibited */ + bm_rcr_cce /* CI index, cache-enabled */ +}; + + +/* ------------------------- */ +/* --- Portal structures --- */ + +#define BM_RCR_SIZE 8 + +struct bm_rcr { + struct bm_rcr_entry *ring, *cursor; + u8 ci, available, ithresh, vbit; +#ifdef CONFIG_FSL_DPA_CHECKING + u32 busy; + enum bm_rcr_pmode pmode; + enum bm_rcr_cmode cmode; +#endif +}; + +struct bm_mc { + struct bm_mc_command *cr; + struct bm_mc_result *rr; + u8 rridx, vbit; +#ifdef CONFIG_FSL_DPA_CHECKING + enum { + /* Can only be _mc_start()ed */ + mc_idle, + /* Can only be _mc_commit()ed or _mc_abort()ed */ + mc_user, + /* Can only be _mc_retry()ed */ + mc_hw + } state; +#endif +}; + +struct bm_addr { + void __iomem *addr_ce; /* cache-enabled */ + void __iomem *addr_ci; /* cache-inhibited */ +}; + +struct bm_portal { + struct bm_addr addr; + struct bm_rcr rcr; + struct bm_mc mc; + struct bm_portal_config config; +} ____cacheline_aligned; + + +/* --------------- */ +/* --- RCR API --- */ + +/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */ +#define RCR_CARRYCLEAR(p) \ + (void *)((unsigned long)(p) & (~(unsigned long)(BM_RCR_SIZE << 6))) + +/* Bit-wise logic to convert a ring pointer to a ring index */ +static inline u8 RCR_PTR2IDX(struct bm_rcr_entry *e) +{ + return ((uintptr_t)e >> 6) & (BM_RCR_SIZE - 1); +} + +/* Increment the 'cursor' ring pointer, taking 'vbit' into account */ +static inline void RCR_INC(struct bm_rcr *rcr) +{ + /* NB: this is odd-looking, but experiments show that it generates + * fast code with essentially no branching overheads. We increment to + * the next RCR pointer and handle overflow and 'vbit'. */ + struct bm_rcr_entry *partial = rcr->cursor + 1; + rcr->cursor = RCR_CARRYCLEAR(partial); + if (partial != rcr->cursor) + rcr->vbit ^= BM_RCR_VERB_VBIT; +} + +static inline int bm_rcr_init(struct bm_portal *portal, enum bm_rcr_pmode pmode, + __maybe_unused enum bm_rcr_cmode cmode) +{ + /* This use of 'register', as well as all other occurances, is because + * it has been observed to generate much faster code with gcc than is + * otherwise the case. */ + register struct bm_rcr *rcr = &portal->rcr; + u32 cfg; + u8 pi; + + rcr->ring = portal->addr.addr_ce + CL_RCR; + rcr->ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1); + pi = bm_in(RCR_PI_CINH) & (BM_RCR_SIZE - 1); + rcr->cursor = rcr->ring + pi; + rcr->vbit = (bm_in(RCR_PI_CINH) & BM_RCR_SIZE) ? BM_RCR_VERB_VBIT : 0; + rcr->available = BM_RCR_SIZE - 1 - cyc_diff(BM_RCR_SIZE, rcr->ci, pi); + rcr->ithresh = bm_in(RCR_ITR); +#ifdef CONFIG_FSL_DPA_CHECKING + rcr->busy = 0; + rcr->pmode = pmode; + rcr->cmode = cmode; +#endif + cfg = (bm_in(CFG) & 0xffffffe0) | (pmode & 0x3); /* BCSP_CFG::RPM */ + bm_out(CFG, cfg); + return 0; +} + +static inline void bm_rcr_finish(struct bm_portal *portal) +{ + register struct bm_rcr *rcr = &portal->rcr; + u8 pi = bm_in(RCR_PI_CINH) & (BM_RCR_SIZE - 1); + u8 ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1); + DPA_ASSERT(!rcr->busy); + if (pi != RCR_PTR2IDX(rcr->cursor)) + pr_crit("losing uncommited RCR entries\n"); + if (ci != rcr->ci) + pr_crit("missing existing RCR completions\n"); + if (rcr->ci != RCR_PTR2IDX(rcr->cursor)) + pr_crit("RCR destroyed unquiesced\n"); +} + +static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal) +{ + register struct bm_rcr *rcr = &portal->rcr; + DPA_ASSERT(!rcr->busy); + if (!rcr->available) + return NULL; +#ifdef CONFIG_FSL_DPA_CHECKING + rcr->busy = 1; +#endif + dcbz_64(rcr->cursor); + return rcr->cursor; +} + +static inline void bm_rcr_abort(struct bm_portal *portal) +{ + __maybe_unused register struct bm_rcr *rcr = &portal->rcr; + DPA_ASSERT(rcr->busy); +#ifdef CONFIG_FSL_DPA_CHECKING + rcr->busy = 0; +#endif +} + +static inline struct bm_rcr_entry *bm_rcr_pend_and_next( + struct bm_portal *portal, u8 myverb) +{ + register struct bm_rcr *rcr = &portal->rcr; + DPA_ASSERT(rcr->busy); + DPA_ASSERT(rcr->pmode != bm_rcr_pvb); + if (rcr->available == 1) + return NULL; + rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit; + dcbf_64(rcr->cursor); + RCR_INC(rcr); + rcr->available--; + dcbz_64(rcr->cursor); + return rcr->cursor; +} + +static inline void bm_rcr_pci_commit(struct bm_portal *portal, u8 myverb) +{ + register struct bm_rcr *rcr = &portal->rcr; + DPA_ASSERT(rcr->busy); + DPA_ASSERT(rcr->pmode == bm_rcr_pci); + rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit; + RCR_INC(rcr); + rcr->available--; + hwsync(); + bm_out(RCR_PI_CINH, RCR_PTR2IDX(rcr->cursor)); +#ifdef CONFIG_FSL_DPA_CHECKING + rcr->busy = 0; +#endif +} + +static inline void bm_rcr_pce_prefetch(struct bm_portal *portal) +{ + __maybe_unused register struct bm_rcr *rcr = &portal->rcr; + DPA_ASSERT(rcr->pmode == bm_rcr_pce); + bm_cl_invalidate(RCR_PI); + bm_cl_touch_rw(RCR_PI); +} + +static inline void bm_rcr_pce_commit(struct bm_portal *portal, u8 myverb) +{ + register struct bm_rcr *rcr = &portal->rcr; + DPA_ASSERT(rcr->busy); + DPA_ASSERT(rcr->pmode == bm_rcr_pce); + rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit; + RCR_INC(rcr); + rcr->available--; + lwsync(); + bm_cl_out(RCR_PI, RCR_PTR2IDX(rcr->cursor)); +#ifdef CONFIG_FSL_DPA_CHECKING + rcr->busy = 0; +#endif +} + +static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb) +{ + register struct bm_rcr *rcr = &portal->rcr; + struct bm_rcr_entry *rcursor; + DPA_ASSERT(rcr->busy); + DPA_ASSERT(rcr->pmode == bm_rcr_pvb); + lwsync(); + rcursor = rcr->cursor; + rcursor->__dont_write_directly__verb = myverb | rcr->vbit; + dcbf_64(rcursor); + RCR_INC(rcr); + rcr->available--; +#ifdef CONFIG_FSL_DPA_CHECKING + rcr->busy = 0; +#endif +} + +static inline u8 bm_rcr_cci_update(struct bm_portal *portal) +{ + register struct bm_rcr *rcr = &portal->rcr; + u8 diff, old_ci = rcr->ci; + DPA_ASSERT(rcr->cmode == bm_rcr_cci); + rcr->ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1); + diff = cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci); + rcr->available += diff; + return diff; +} + +static inline void bm_rcr_cce_prefetch(struct bm_portal *portal) +{ + __maybe_unused register struct bm_rcr *rcr = &portal->rcr; + DPA_ASSERT(rcr->cmode == bm_rcr_cce); + bm_cl_touch_ro(RCR_CI); +} + +static inline u8 bm_rcr_cce_update(struct bm_portal *portal) +{ + register struct bm_rcr *rcr = &portal->rcr; + u8 diff, old_ci = rcr->ci; + DPA_ASSERT(rcr->cmode == bm_rcr_cce); + rcr->ci = bm_cl_in(RCR_CI) & (BM_RCR_SIZE - 1); + bm_cl_invalidate(RCR_CI); + diff = cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci); + rcr->available += diff; + return diff; +} + +static inline u8 bm_rcr_get_ithresh(struct bm_portal *portal) +{ + register struct bm_rcr *rcr = &portal->rcr; + return rcr->ithresh; +} + +static inline void bm_rcr_set_ithresh(struct bm_portal *portal, u8 ithresh) +{ + register struct bm_rcr *rcr = &portal->rcr; + rcr->ithresh = ithresh; + bm_out(RCR_ITR, ithresh); +} + +static inline u8 bm_rcr_get_avail(struct bm_portal *portal) +{ + register struct bm_rcr *rcr = &portal->rcr; + return rcr->available; +} + +static inline u8 bm_rcr_get_fill(struct bm_portal *portal) +{ + register struct bm_rcr *rcr = &portal->rcr; + return BM_RCR_SIZE - 1 - rcr->available; +} + + +/* ------------------------------ */ +/* --- Management command API --- */ + +static inline int bm_mc_init(struct bm_portal *portal) +{ + register struct bm_mc *mc = &portal->mc; + mc->cr = portal->addr.addr_ce + CL_CR; + mc->rr = portal->addr.addr_ce + CL_RR0; + mc->rridx = (__raw_readb(&mc->cr->__dont_write_directly__verb) & + BM_MCC_VERB_VBIT) ? 0 : 1; + mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0; +#ifdef CONFIG_FSL_DPA_CHECKING + mc->state = mc_idle; +#endif + return 0; +} + +static inline void bm_mc_finish(struct bm_portal *portal) +{ + __maybe_unused register struct bm_mc *mc = &portal->mc; + DPA_ASSERT(mc->state == mc_idle); +#ifdef CONFIG_FSL_DPA_CHECKING + if (mc->state != mc_idle) + pr_crit("Losing incomplete MC command\n"); +#endif +} + +static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal) +{ + register struct bm_mc *mc = &portal->mc; + DPA_ASSERT(mc->state == mc_idle); +#ifdef CONFIG_FSL_DPA_CHECKING + mc->state = mc_user; +#endif + dcbz_64(mc->cr); + return mc->cr; +} + +static inline void bm_mc_abort(struct bm_portal *portal) +{ + __maybe_unused register struct bm_mc *mc = &portal->mc; + DPA_ASSERT(mc->state == mc_user); +#ifdef CONFIG_FSL_DPA_CHECKING + mc->state = mc_idle; +#endif +} + +static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb) +{ + register struct bm_mc *mc = &portal->mc; + struct bm_mc_result *rr = mc->rr + mc->rridx; + DPA_ASSERT(mc->state == mc_user); + lwsync(); + mc->cr->__dont_write_directly__verb = myverb | mc->vbit; + dcbf(mc->cr); + dcbit_ro(rr); +#ifdef CONFIG_FSL_DPA_CHECKING + mc->state = mc_hw; +#endif +} + +static inline struct bm_mc_result *bm_mc_result(struct bm_portal *portal) +{ + register struct bm_mc *mc = &portal->mc; + struct bm_mc_result *rr = mc->rr + mc->rridx; + DPA_ASSERT(mc->state == mc_hw); + /* The inactive response register's verb byte always returns zero until + * its command is submitted and completed. This includes the valid-bit, + * in case you were wondering... */ + if (!__raw_readb(&rr->verb)) { + dcbit_ro(rr); + return NULL; + } + mc->rridx ^= 1; + mc->vbit ^= BM_MCC_VERB_VBIT; +#ifdef CONFIG_FSL_DPA_CHECKING + mc->state = mc_idle; +#endif + return rr; +} + + +/* ------------------------------------- */ +/* --- Portal interrupt register API --- */ + +static inline int bm_isr_init(__always_unused struct bm_portal *portal) +{ + return 0; +} + +static inline void bm_isr_finish(__always_unused struct bm_portal *portal) +{ +} + +#define SCN_REG(bpid) REG_SCN((bpid) / 32) +#define SCN_BIT(bpid) (0x80000000 >> (bpid & 31)) +static inline void bm_isr_bscn_mask(struct bm_portal *portal, u8 bpid, + int enable) +{ + u32 val; + DPA_ASSERT(bpid < bman_pool_max); + /* REG_SCN for bpid=0..31, REG_SCN+4 for bpid=32..63 */ + val = __bm_in(&portal->addr, SCN_REG(bpid)); + if (enable) + val |= SCN_BIT(bpid); + else + val &= ~SCN_BIT(bpid); + __bm_out(&portal->addr, SCN_REG(bpid), val); +} + +static inline u32 __bm_isr_read(struct bm_portal *portal, enum bm_isr_reg n) +{ + return __bm_in(&portal->addr, REG_ISR + (n << 2)); +} + +static inline void __bm_isr_write(struct bm_portal *portal, enum bm_isr_reg n, + u32 val) +{ + __bm_out(&portal->addr, REG_ISR + (n << 2), val); +} diff --git a/drivers/staging/fsl_qbman/bman_private.h b/drivers/staging/fsl_qbman/bman_private.h new file mode 100644 index 0000000..c567314 --- /dev/null +++ b/drivers/staging/fsl_qbman/bman_private.h @@ -0,0 +1,144 @@ +/* Copyright 2008-2012 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "dpa_sys.h" +#include + +/* Revision info (for errata and feature handling) */ +#define BMAN_REV10 0x0100 +#define BMAN_REV20 0x0200 +#define BMAN_REV21 0x0201 +extern u16 bman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */ + +/* + * Global variables of the max portal/pool number this bman version supported + */ +extern u16 bman_pool_max; + +/* used by CCSR and portal interrupt code */ +enum bm_isr_reg { + bm_isr_status = 0, + bm_isr_enable = 1, + bm_isr_disable = 2, + bm_isr_inhibit = 3 +}; + +struct bm_portal_config { + /* Corenet portal addresses; + * [0]==cache-enabled, [1]==cache-inhibited. */ + __iomem void *addr_virt[2]; + struct resource addr_phys[2]; + /* Allow these to be joined in lists */ + struct list_head list; + /* User-visible portal configuration settings */ + struct bman_portal_config public_cfg; +}; + +#ifdef CONFIG_FSL_BMAN_CONFIG +/* Hooks from bman_driver.c to bman_config.c */ +int bman_init_ccsr(struct device_node *node); +#endif + +/* Hooks from bman_driver.c in to bman_high.c */ +struct bman_portal *bman_create_affine_portal( + const struct bm_portal_config *config); +struct bman_portal *bman_create_affine_slave(struct bman_portal *redirect); +const struct bm_portal_config *bman_destroy_affine_portal(void); + +/* Pool logic in the portal driver, during initialisation, needs to know if + * there's access to CCSR or not (if not, it'll cripple the pool allocator). */ +#ifdef CONFIG_FSL_BMAN_CONFIG +int bman_have_ccsr(void); +#else +#define bman_have_ccsr() 0 +#endif + +/* Stockpile build constants. The _LOW value: when bman_acquire() is called and + * the stockpile fill-level is <= _LOW, an acquire is attempted from h/w but it + * might fail (if the buffer pool is depleted). So this value provides some + * "stagger" in that the bman_acquire() function will only fail if lots of bufs + * are requested at once or if h/w has been tested a couple of times without + * luck. The _HIGH value: when bman_release() is called and the stockpile + * fill-level is >= _HIGH, a release is attempted to h/w but it might fail (if + * the release ring is full). So this value provides some "stagger" so that + * ring-access is retried a couple of times prior to the API returning a + * failure. The following *must* be true; + * BMAN_STOCKPILE_HIGH-BMAN_STOCKPILE_LOW > 8 + * (to avoid thrashing) + * BMAN_STOCKPILE_SZ >= 16 + * (as the release logic expects to either send 8 buffers to hw prior to + * adding the given buffers to the stockpile or add the buffers to the + * stockpile before sending 8 to hw, as the API must be an all-or-nothing + * success/fail.) + */ +#define BMAN_STOCKPILE_SZ 16u /* number of bufs in per-pool cache */ +#define BMAN_STOCKPILE_LOW 2u /* when fill is <= this, acquire from hw */ +#define BMAN_STOCKPILE_HIGH 14u /* when fill is >= this, release to hw */ + +/*************************************************/ +/* BMan s/w corenet portal, low-level i/face */ +/*************************************************/ + +/* Used by all portal interrupt registers except 'inhibit'. NB, some of these + * definitions are exported for use by the bman_irqsource_***() APIs, so are + * commented-out here. */ +#if 0 +#define BM_PIRQ_RCRI 0x00000002 /* RCR Ring (below threshold) */ +#define BM_PIRQ_BSCN 0x00000001 /* Buffer depletion State Change */ +#endif +/* This mask contains all the "irqsource" bits visible to API users */ +#define BM_PIRQ_VISIBLE (BM_PIRQ_RCRI | BM_PIRQ_BSCN) + +/* These are bm__(). So for example, bm_disable_write() means "write + * the disable register" rather than "disable the ability to write". */ +#define bm_isr_status_read(bm) __bm_isr_read(bm, bm_isr_status) +#define bm_isr_status_clear(bm, m) __bm_isr_write(bm, bm_isr_status, m) +#define bm_isr_enable_read(bm) __bm_isr_read(bm, bm_isr_enable) +#define bm_isr_enable_write(bm, v) __bm_isr_write(bm, bm_isr_enable, v) +#define bm_isr_disable_read(bm) __bm_isr_read(bm, bm_isr_disable) +#define bm_isr_disable_write(bm, v) __bm_isr_write(bm, bm_isr_disable, v) +#define bm_isr_inhibit(bm) __bm_isr_write(bm, bm_isr_inhibit, 1) +#define bm_isr_uninhibit(bm) __bm_isr_write(bm, bm_isr_inhibit, 0) + +#ifdef CONFIG_FSL_BMAN_CONFIG +/* Set depletion thresholds associated with a buffer pool. Requires that the + * operating system have access to Bman CCSR (ie. compiled in support and + * run-time access courtesy of the device-tree). */ +int bm_pool_set(u32 bpid, const u32 *thresholds); +#define BM_POOL_THRESH_SW_ENTER 0 +#define BM_POOL_THRESH_SW_EXIT 1 +#define BM_POOL_THRESH_HW_ENTER 2 +#define BM_POOL_THRESH_HW_EXIT 3 + +/* Read the free buffer count for a given buffer */ +u32 bm_pool_free_buffers(u32 bpid); + +#endif /* CONFIG_FSL_BMAN_CONFIG */ diff --git a/drivers/staging/fsl_qbman/bman_test.c b/drivers/staging/fsl_qbman/bman_test.c new file mode 100644 index 0000000..db5b7fd --- /dev/null +++ b/drivers/staging/fsl_qbman/bman_test.c @@ -0,0 +1,56 @@ +/* Copyright 2008-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "bman_test.h" + +MODULE_AUTHOR("Geoff Thorpe"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("Bman testing"); + +static int test_init(void) +{ +#ifdef CONFIG_FSL_BMAN_TEST_HIGH + int loop = 1; + while (loop--) + bman_test_high(); +#endif +#ifdef CONFIG_FSL_BMAN_TEST_THRESH + bman_test_thresh(); +#endif + return 0; +} + +static void test_exit(void) +{ +} + +module_init(test_init); +module_exit(test_exit); diff --git a/drivers/staging/fsl_qbman/bman_test.h b/drivers/staging/fsl_qbman/bman_test.h new file mode 100644 index 0000000..27c7c05 --- /dev/null +++ b/drivers/staging/fsl_qbman/bman_test.h @@ -0,0 +1,91 @@ +/* Copyright 2008-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +void bman_test_high(void); +void bman_test_thresh(void); + +static inline void __hexdump(unsigned long start, unsigned long end, + unsigned long p, size_t sz, unsigned char *c) +{ + while (start < end) { + unsigned int pos = 0; + char buf[64]; + int nl = 0; + pos += sprintf(buf + pos, "%08lx: ", start); + do { + if ((start < p) || (start >= (p + sz))) + pos += sprintf(buf + pos, ".."); + else + pos += sprintf(buf + pos, "%02x", *(c++)); + if (!(++start & 15)) { + buf[pos++] = '\n'; + nl = 1; + } else { + nl = 0; + if(!(start & 1)) + buf[pos++] = ' '; + if(!(start & 3)) + buf[pos++] = ' '; + } + } while (start & 15); + if (!nl) + buf[pos++] = '\n'; + buf[pos] = '\0'; + pr_info("%s", buf); + } +} +static inline void hexdump(void *ptr, size_t sz) +{ + unsigned long p = (unsigned long)ptr; + unsigned long start = p & ~(unsigned long)15; + unsigned long end = (p + sz + 15) & ~(unsigned long)15; + unsigned char *c = ptr; + __hexdump(start, end, p, sz, c); +} +static inline void hexdump_by_cl(void *ptr, size_t sz) +{ + unsigned long p = (unsigned long)ptr; + unsigned long start = p & ~(unsigned long)63; + unsigned long end = (p + sz + 63) & ~(unsigned long)63; + unsigned char *c = ptr; + __hexdump(start, end, p, sz, c); +} diff --git a/drivers/staging/fsl_qbman/bman_test_high.c b/drivers/staging/fsl_qbman/bman_test_high.c new file mode 100644 index 0000000..f2a5284 --- /dev/null +++ b/drivers/staging/fsl_qbman/bman_test_high.c @@ -0,0 +1,181 @@ +/* Copyright 2008-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "bman_test.h" +#include "bman_private.h" + +/*************/ +/* constants */ +/*************/ + +#define PORTAL_OPAQUE (void *)0xf00dbeef +#define POOL_OPAQUE (void *)0xdeadabba +#define NUM_BUFS 93 +#define LOOPS 3 +#define BMAN_TOKEN_MASK 0x00FFFFFFFFFFLLU + +/***************/ +/* global vars */ +/***************/ + +static struct bman_pool *pool; +static int depleted; +static struct bm_buffer bufs_in[NUM_BUFS] ____cacheline_aligned; +static struct bm_buffer bufs_out[NUM_BUFS] ____cacheline_aligned; +static int bufs_received; + +/* Predeclare the callback so we can instantiate pool parameters */ +static void depletion_cb(struct bman_portal *, struct bman_pool *, void *, int); + +/**********************/ +/* internal functions */ +/**********************/ + +static void bufs_init(void) +{ + int i; + for (i = 0; i < NUM_BUFS; i++) + bm_buffer_set64(&bufs_in[i], 0xfedc01234567LLU * i); + bufs_received = 0; +} + +static inline int bufs_cmp(const struct bm_buffer *a, const struct bm_buffer *b) +{ + if ((bman_ip_rev == BMAN_REV20) || (bman_ip_rev == BMAN_REV21)) { + + /* On SoCs with Bman revison 2.0, Bman only respects the 40 + * LS-bits of buffer addresses, masking off the upper 8-bits on + * release commands. The API provides for 48-bit addresses + * because some SoCs support all 48-bits. When generating + * garbage addresses for testing, we either need to zero the + * upper 8-bits when releasing to Bman (otherwise we'll be + * disappointed when the buffers we acquire back from Bman + * don't match), or we need to mask the upper 8-bits off when + * comparing. We do the latter. + */ + if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK) + < (bm_buffer_get64(b) & BMAN_TOKEN_MASK)) + return -1; + if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK) + > (bm_buffer_get64(b) & BMAN_TOKEN_MASK)) + return 1; + } else { + if (bm_buffer_get64(a) < bm_buffer_get64(b)) + return -1; + if (bm_buffer_get64(a) > bm_buffer_get64(b)) + return 1; + } + + return 0; +} + +static void bufs_confirm(void) +{ + int i, j; + for (i = 0; i < NUM_BUFS; i++) { + int matches = 0; + for (j = 0; j < NUM_BUFS; j++) + if (!bufs_cmp(&bufs_in[i], &bufs_out[j])) + matches++; + BUG_ON(matches != 1); + } +} + +/********/ +/* test */ +/********/ + +static void depletion_cb(struct bman_portal *__portal, struct bman_pool *__pool, + void *pool_ctx, int __depleted) +{ + BUG_ON(__pool != pool); + BUG_ON(pool_ctx != POOL_OPAQUE); + depleted = __depleted; +} + +void bman_test_high(void) +{ + struct bman_pool_params pparams = { + .flags = BMAN_POOL_FLAG_DEPLETION | BMAN_POOL_FLAG_DYNAMIC_BPID, + .cb = depletion_cb, + .cb_ctx = POOL_OPAQUE, + }; + int i, loops = LOOPS; + + bufs_init(); + + pr_info("BMAN: --- starting high-level test ---\n"); + + pool = bman_new_pool(&pparams); + BUG_ON(!pool); + + /*******************/ + /* Release buffers */ + /*******************/ +do_loop: + i = 0; + while (i < NUM_BUFS) { + u32 flags = BMAN_RELEASE_FLAG_WAIT; + int num = 8; + if ((i + num) > NUM_BUFS) + num = NUM_BUFS - i; + if ((i + num) == NUM_BUFS) + flags |= BMAN_RELEASE_FLAG_WAIT_SYNC; + if (bman_release(pool, bufs_in + i, num, flags)) + panic("bman_release() failed\n"); + i += num; + } + + /*******************/ + /* Acquire buffers */ + /*******************/ + while (i > 0) { + int tmp, num = 8; + if (num > i) + num = i; + tmp = bman_acquire(pool, bufs_out + i - num, num, 0); + BUG_ON(tmp != num); + i -= num; + } + i = bman_acquire(pool, NULL, 1, 0); + BUG_ON(i > 0); + + bufs_confirm(); + + if (--loops) + goto do_loop; + + /************/ + /* Clean up */ + /************/ + bman_free_pool(pool); + pr_info("BMAN: --- finished high-level test ---\n"); +} diff --git a/drivers/staging/fsl_qbman/bman_test_thresh.c b/drivers/staging/fsl_qbman/bman_test_thresh.c new file mode 100644 index 0000000..b11862f --- /dev/null +++ b/drivers/staging/fsl_qbman/bman_test_thresh.c @@ -0,0 +1,196 @@ +/* Copyright 2010-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "bman_test.h" + +/* Test constants */ +#define TEST_NUMBUFS 129728 +#define TEST_EXIT 129536 +#define TEST_ENTRY 129024 + +struct affine_test_data { + struct task_struct *t; + int cpu; + int expect_affinity; + int drain; + int num_enter; + int num_exit; + struct list_head node; + struct completion wakethread; + struct completion wakeparent; +}; + +static void cb_depletion(struct bman_portal *portal, + struct bman_pool *pool, + void *opaque, + int depleted) +{ + struct affine_test_data *data = opaque; + int c = smp_processor_id(); + pr_info("cb_depletion: bpid=%d, depleted=%d, cpu=%d, original=%d\n", + bman_get_params(pool)->bpid, depleted, c, data->cpu); + /* We should be executing on the CPU of the thread that owns the pool if + * and that CPU has an affine portal (ie. it isn't slaved). */ + BUG_ON((c != data->cpu) && data->expect_affinity); + BUG_ON((c == data->cpu) && !data->expect_affinity); + if (depleted) + data->num_enter++; + else + data->num_exit++; +} + +/* Params used to set up a pool, this also dynamically allocates a BPID */ +struct bman_pool_params params_nocb = { + .flags = BMAN_POOL_FLAG_DYNAMIC_BPID | BMAN_POOL_FLAG_THRESH, + .thresholds = { TEST_ENTRY, TEST_EXIT, 0, 0 } +}; + +/* Params used to set up each cpu's pool with callbacks enabled */ +struct bman_pool_params params_cb = { + .bpid = 0, /* will be replaced to match pool_nocb */ + .flags = BMAN_POOL_FLAG_DEPLETION, + .cb = cb_depletion +}; + +static struct bman_pool *pool_nocb; +static LIST_HEAD(threads); + +static int affine_test(void *__data) +{ + struct bman_pool *pool; + struct affine_test_data *data = __data; + struct bman_pool_params my_params = params_cb; + + pr_info("thread %d: starting\n", data->cpu); + /* create the pool */ + my_params.cb_ctx = data; + pool = bman_new_pool(&my_params); + BUG_ON(!pool); + complete(&data->wakeparent); + wait_for_completion(&data->wakethread); + init_completion(&data->wakethread); + + /* if we're the drainer, we get signalled for that */ + if (data->drain) { + struct bm_buffer buf; + int ret; + pr_info("thread %d: draining...\n", data->cpu); + do { + ret = bman_acquire(pool, &buf, 1, 0); + } while (ret > 0); + pr_info("thread %d: draining done.\n", data->cpu); + complete(&data->wakeparent); + wait_for_completion(&data->wakethread); + init_completion(&data->wakethread); + } + + /* cleanup */ + bman_free_pool(pool); + while (!kthread_should_stop()) + cpu_relax(); + pr_info("thread %d: exiting\n", data->cpu); + return 0; +} + +static struct affine_test_data *start_affine_test(int cpu, int drain) +{ + struct affine_test_data *data = kmalloc(sizeof(*data), GFP_KERNEL); + + if (!data) + return NULL; + data->cpu = cpu; + data->expect_affinity = cpumask_test_cpu(cpu, bman_affine_cpus()); + data->drain = drain; + data->num_enter = 0; + data->num_exit = 0; + init_completion(&data->wakethread); + init_completion(&data->wakeparent); + list_add_tail(&data->node, &threads); + data->t = kthread_create(affine_test, data, "threshtest%d", cpu); + BUG_ON(IS_ERR(data->t)); + kthread_bind(data->t, cpu); + wake_up_process(data->t); + return data; +} + +void bman_test_thresh(void) +{ + int loop = TEST_NUMBUFS; + int ret, num_cpus = 0; + struct affine_test_data *data, *drainer = NULL; + + pr_info("bman_test_thresh: start\n"); + + /* allocate a BPID and seed it */ + pool_nocb = bman_new_pool(¶ms_nocb); + BUG_ON(!pool_nocb); + while (loop--) { + struct bm_buffer buf; + bm_buffer_set64(&buf, 0x0badbeef + loop); + ret = bman_release(pool_nocb, &buf, 1, + BMAN_RELEASE_FLAG_WAIT); + BUG_ON(ret); + } + while (!bman_rcr_is_empty()) + cpu_relax(); + pr_info("bman_test_thresh: buffers are in\n"); + + /* create threads and wait for them to create pools */ + params_cb.bpid = bman_get_params(pool_nocb)->bpid; + for_each_cpu(loop, cpu_online_mask) { + data = start_affine_test(loop, drainer ? 0 : 1); + BUG_ON(!data); + if (!drainer) + drainer = data; + num_cpus++; + wait_for_completion(&data->wakeparent); + } + + /* signal the drainer to start draining */ + complete(&drainer->wakethread); + wait_for_completion(&drainer->wakeparent); + init_completion(&drainer->wakeparent); + + /* tear down */ + list_for_each_entry_safe(data, drainer, &threads, node) { + complete(&data->wakethread); + ret = kthread_stop(data->t); + BUG_ON(ret); + list_del(&data->node); + /* check that we get the expected callbacks (and no others) */ + BUG_ON(data->num_enter != 1); + BUG_ON(data->num_exit != 0); + kfree(data); + } + bman_free_pool(pool_nocb); + + pr_info("bman_test_thresh: done\n"); +} diff --git a/drivers/staging/fsl_qbman/dpa_alloc.c b/drivers/staging/fsl_qbman/dpa_alloc.c new file mode 100644 index 0000000..03a9d28 --- /dev/null +++ b/drivers/staging/fsl_qbman/dpa_alloc.c @@ -0,0 +1,503 @@ +/* Copyright 2009-2012 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "dpa_sys.h" +#include +#include + +/* Qman and Bman APIs are front-ends to the common code; */ + +static DECLARE_DPA_ALLOC(bpalloc); /* BPID allocator */ +static DECLARE_DPA_ALLOC(fqalloc); /* FQID allocator */ +static DECLARE_DPA_ALLOC(qpalloc); /* pool-channel allocator */ +static DECLARE_DPA_ALLOC(cgralloc); /* CGR ID allocator */ +static DECLARE_DPA_ALLOC(ceetm0_challoc); /* CEETM Channel ID allocator */ +static DECLARE_DPA_ALLOC(ceetm0_lfqidalloc); /* CEETM LFQID allocator */ +static DECLARE_DPA_ALLOC(ceetm1_challoc); /* CEETM Channel ID allocator */ +static DECLARE_DPA_ALLOC(ceetm1_lfqidalloc); /* CEETM LFQID allocator */ + +/* This is a sort-of-conditional dpa_alloc_free() routine. Eg. when releasing + * FQIDs (probably from user-space), it can filter out those that aren't in the + * OOS state (better to leak a h/w resource than to crash). This function + * returns the number of invalid IDs that were not released. */ +static u32 release_id_range(struct dpa_alloc *alloc, u32 id, u32 count, + int (*is_valid)(u32 id)) +{ + int valid_mode = 0; + u32 loop = id, total_invalid = 0; + while (loop < (id + count)) { + int isvalid = is_valid ? is_valid(loop) : 1; + if (!valid_mode) { + /* We're looking for a valid ID to terminate an invalid + * range */ + if (isvalid) { + /* We finished a range of invalid IDs, a valid + * range is now underway */ + valid_mode = 1; + count -= (loop - id); + id = loop; + } else + total_invalid++; + } else { + /* We're looking for an invalid ID to terminate a + * valid range */ + if (!isvalid) { + /* Release the range of valid IDs, an unvalid + * range is now underway */ + if (loop > id) + dpa_alloc_free(alloc, id, loop - id); + valid_mode = 0; + } + } + loop++; + } + /* Release any unterminated range of valid IDs */ + if (valid_mode && count) + dpa_alloc_free(alloc, id, count); + return total_invalid; +} + +/* BPID allocator front-end */ + +int bman_alloc_bpid_range(u32 *result, u32 count, u32 align, int partial) +{ + return dpa_alloc_new(&bpalloc, result, count, align, partial); +} +EXPORT_SYMBOL(bman_alloc_bpid_range); + +static int bp_valid(u32 bpid) +{ + struct bm_pool_state state; + int ret = bman_query_pools(&state); + BUG_ON(ret); + if (bman_depletion_get(&state.as.state, bpid)) + /* "Available==1" means unavailable, go figure. Ie. it has no + * buffers, which is means it is valid for deallocation. (So + * true means false, which means true...) */ + return 1; + return 0; +} +void bman_release_bpid_range(u32 bpid, u32 count) +{ + u32 total_invalid = release_id_range(&bpalloc, bpid, count, bp_valid); + if (total_invalid) + pr_err("BPID range [%d..%d] (%d) had %d leaks\n", + bpid, bpid + count - 1, count, total_invalid); +} +EXPORT_SYMBOL(bman_release_bpid_range); + +/* FQID allocator front-end */ + +int qman_alloc_fqid_range(u32 *result, u32 count, u32 align, int partial) +{ + return dpa_alloc_new(&fqalloc, result, count, align, partial); +} +EXPORT_SYMBOL(qman_alloc_fqid_range); + +static int fq_valid(u32 fqid) +{ + struct qman_fq fq = { + .fqid = fqid + }; + struct qm_mcr_queryfq_np np; + int err = qman_query_fq_np(&fq, &np); + BUG_ON(err); + return ((np.state & QM_MCR_NP_STATE_MASK) == QM_MCR_NP_STATE_OOS); +} +void qman_release_fqid_range(u32 fqid, u32 count) +{ + u32 total_invalid = release_id_range(&fqalloc, fqid, count, fq_valid); + if (total_invalid) + pr_err("FQID range [%d..%d] (%d) had %d leaks\n", + fqid, fqid + count - 1, count, total_invalid); +} +EXPORT_SYMBOL(qman_release_fqid_range); + +/* Pool-channel allocator front-end */ + +int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial) +{ + return dpa_alloc_new(&qpalloc, result, count, align, partial); +} +EXPORT_SYMBOL(qman_alloc_pool_range); + +static int qp_valid(u32 qp) +{ + /* TBD: when resource-management improves, we may be able to find + * something better than this. Currently we query all FQDs starting from + * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs + * whose destination channel is the pool-channel being released. */ + struct qman_fq fq = { + .fqid = 1 + }; + int err; + do { + struct qm_mcr_queryfq_np np; + err = qman_query_fq_np(&fq, &np); + if (err) + /* FQID range exceeded, found no problems */ + return 1; + if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) { + struct qm_fqd fqd; + err = qman_query_fq(&fq, &fqd); + BUG_ON(err); + if (fqd.dest.channel == qp) + /* The channel is the FQ's target, can't free */ + return 0; + } + /* Move to the next FQID */ + fq.fqid++; + } while (1); +} +void qman_release_pool_range(u32 qp, u32 count) +{ + u32 total_invalid = release_id_range(&qpalloc, qp, count, qp_valid); + if (total_invalid) { + /* Pool channels are almost always used individually */ + if (count == 1) + pr_err("Pool channel 0x%x had %d leaks\n", + qp, total_invalid); + else + pr_err("Pool channels [%d..%d] (%d) had %d leaks\n", + qp, qp + count - 1, count, total_invalid); + } +} +EXPORT_SYMBOL(qman_release_pool_range); + +/* CGR ID allocator front-end */ + +int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial) +{ + return dpa_alloc_new(&cgralloc, result, count, align, partial); +} +EXPORT_SYMBOL(qman_alloc_cgrid_range); + +void qman_release_cgrid_range(u32 cgrid, u32 count) +{ + u32 total_invalid = release_id_range(&cgralloc, cgrid, count, NULL); + if (total_invalid) + pr_err("CGRID range [%d..%d] (%d) had %d leaks\n", + cgrid, cgrid + count - 1, count, total_invalid); +} +EXPORT_SYMBOL(qman_release_cgrid_range); + +/* CEETM CHANNEL ID allocator front-end */ +int qman_alloc_ceetm0_channel_range(u32 *result, u32 count, u32 align, + int partial) +{ + return dpa_alloc_new(&ceetm0_challoc, result, count, align, partial); +} +EXPORT_SYMBOL(qman_alloc_ceetm0_channel_range); + +int qman_alloc_ceetm1_channel_range(u32 *result, u32 count, u32 align, + int partial) +{ + return dpa_alloc_new(&ceetm1_challoc, result, count, align, partial); +} +EXPORT_SYMBOL(qman_alloc_ceetm1_channel_range); + +void qman_release_ceetm0_channel_range(u32 channelid, u32 count) +{ + u32 total_invalid; + + total_invalid = release_id_range(&ceetm0_challoc, channelid, count, + NULL); + if (total_invalid) + pr_err("CEETM channel range [%d..%d] (%d) had %d leaks\n", + channelid, channelid + count - 1, count, total_invalid); +} +EXPORT_SYMBOL(qman_release_ceetm0_channel_range); + +void qman_release_ceetm1_channel_range(u32 channelid, u32 count) +{ + u32 total_invalid; + total_invalid = release_id_range(&ceetm1_challoc, channelid, count, + NULL); + if (total_invalid) + pr_err("CEETM channel range [%d..%d] (%d) had %d leaks\n", + channelid, channelid + count - 1, count, total_invalid); +} +EXPORT_SYMBOL(qman_release_ceetm1_channel_range); + +/* CEETM LFQID allocator front-end */ +int qman_alloc_ceetm0_lfqid_range(u32 *result, u32 count, u32 align, + int partial) +{ + return dpa_alloc_new(&ceetm0_lfqidalloc, result, count, align, partial); +} +EXPORT_SYMBOL(qman_alloc_ceetm0_lfqid_range); + +int qman_alloc_ceetm1_lfqid_range(u32 *result, u32 count, u32 align, + int partial) +{ + return dpa_alloc_new(&ceetm1_lfqidalloc, result, count, align, partial); +} +EXPORT_SYMBOL(qman_alloc_ceetm1_lfqid_range); + +void qman_release_ceetm0_lfqid_range(u32 lfqid, u32 count) +{ + u32 total_invalid; + + total_invalid = release_id_range(&ceetm0_lfqidalloc, lfqid, count, + NULL); + if (total_invalid) + pr_err("CEETM LFQID range [0x%x..0x%x] (%d) had %d leaks\n", + lfqid, lfqid + count - 1, count, total_invalid); +} +EXPORT_SYMBOL(qman_release_ceetm0_lfqid_range); + +void qman_release_ceetm1_lfqid_range(u32 lfqid, u32 count) +{ + u32 total_invalid; + + total_invalid = release_id_range(&ceetm1_lfqidalloc, lfqid, count, + NULL); + if (total_invalid) + pr_err("CEETM LFQID range [0x%x..0x%x] (%d) had %d leaks\n", + lfqid, lfqid + count - 1, count, total_invalid); +} +EXPORT_SYMBOL(qman_release_ceetm1_lfqid_range); + +/* Everything else is the common backend to all the allocators */ + +/* The allocator is a (possibly-empty) list of these; */ +struct alloc_node { + struct list_head list; + u32 base; + u32 num; +}; + +/* #define DPA_ALLOC_DEBUG */ + +#ifdef DPA_ALLOC_DEBUG +#define DPRINT pr_info +static void DUMP(struct dpa_alloc *alloc) +{ + int off = 0; + char buf[256]; + struct alloc_node *p; + list_for_each_entry(p, &alloc->list, list) { + if (off < 255) + off += snprintf(buf + off, 255-off, "{%d,%d}", + p->base, p->base + p->num - 1); + } + pr_info("%s\n", buf); +} +#else +#define DPRINT(x...) do { ; } while (0) +#define DUMP(a) do { ; } while (0) +#endif + +int dpa_alloc_new(struct dpa_alloc *alloc, u32 *result, u32 count, u32 align, + int partial) +{ + struct alloc_node *i = NULL, *next_best = NULL; + u32 base, next_best_base = 0, num = 0, next_best_num = 0; + struct alloc_node *margin_left, *margin_right; + + *result = (u32)-1; + DPRINT("alloc_range(%d,%d,%d)\n", count, align, partial); + DUMP(alloc); + /* If 'align' is 0, it should behave as though it was 1 */ + if (!align) + align = 1; + margin_left = kmalloc(sizeof(*margin_left), GFP_KERNEL); + if (!margin_left) + goto err; + margin_right = kmalloc(sizeof(*margin_right), GFP_KERNEL); + if (!margin_right) { + kfree(margin_left); + goto err; + } + spin_lock_irq(&alloc->lock); + list_for_each_entry(i, &alloc->list, list) { + base = (i->base + align - 1) / align; + base *= align; + if ((base - i->base) >= i->num) + /* alignment is impossible, regardless of count */ + continue; + num = i->num - (base - i->base); + if (num >= count) { + /* this one will do nicely */ + num = count; + goto done; + } + if (num > next_best_num) { + next_best = i; + next_best_base = base; + next_best_num = num; + } + } + if (partial && next_best) { + i = next_best; + base = next_best_base; + num = next_best_num; + } else + i = NULL; +done: + if (i) { + if (base != i->base) { + margin_left->base = i->base; + margin_left->num = base - i->base; + list_add_tail(&margin_left->list, &i->list); + } else + kfree(margin_left); + if ((base + num) < (i->base + i->num)) { + margin_right->base = base + num; + margin_right->num = (i->base + i->num) - + (base + num); + list_add(&margin_right->list, &i->list); + } else + kfree(margin_right); + list_del(&i->list); + kfree(i); + *result = base; + } + spin_unlock_irq(&alloc->lock); +err: + DPRINT("returning %d\n", i ? num : -ENOMEM); + DUMP(alloc); + return i ? (int)num : -ENOMEM; +} + +/* Allocate the list node using GFP_ATOMIC, because we *really* want to avoid + * forcing error-handling on to users in the deallocation path. */ +void dpa_alloc_free(struct dpa_alloc *alloc, u32 base_id, u32 count) +{ + struct alloc_node *i, *node = kmalloc(sizeof(*node), GFP_ATOMIC); + BUG_ON(!node); + DPRINT("release_range(%d,%d)\n", base_id, count); + DUMP(alloc); + BUG_ON(!count); + spin_lock_irq(&alloc->lock); + node->base = base_id; + node->num = count; + list_for_each_entry(i, &alloc->list, list) { + if (i->base >= node->base) { + /* BUG_ON(any overlapping) */ + BUG_ON(i->base < (node->base + node->num)); + list_add_tail(&node->list, &i->list); + goto done; + } + } + list_add_tail(&node->list, &alloc->list); +done: + /* Merge to the left */ + i = list_entry(node->list.prev, struct alloc_node, list); + if (node->list.prev != &alloc->list) { + BUG_ON((i->base + i->num) > node->base); + if ((i->base + i->num) == node->base) { + node->base = i->base; + node->num += i->num; + list_del(&i->list); + kfree(i); + } + } + /* Merge to the right */ + i = list_entry(node->list.next, struct alloc_node, list); + if (node->list.next != &alloc->list) { + BUG_ON((node->base + node->num) > i->base); + if ((node->base + node->num) == i->base) { + node->num += i->num; + list_del(&i->list); + kfree(i); + } + } + spin_unlock_irq(&alloc->lock); + DUMP(alloc); +} + +int dpa_alloc_reserve(struct dpa_alloc *alloc, u32 base, u32 num) +{ + struct alloc_node *i = NULL; + struct alloc_node *margin_left, *margin_right; + + DPRINT("alloc_reserve(%d,%d)\n", base_id, count); + DUMP(alloc); + margin_left = kmalloc(sizeof(*margin_left), GFP_KERNEL); + if (!margin_left) + goto err; + margin_right = kmalloc(sizeof(*margin_right), GFP_KERNEL); + if (!margin_right) { + kfree(margin_left); + goto err; + } + spin_lock_irq(&alloc->lock); + list_for_each_entry(i, &alloc->list, list) + if ((i->base <= base) && ((i->base + i->num) >= (base + num))) + /* yep, the reservation is within this node */ + goto done; + i = NULL; +done: + if (i) { + if (base != i->base) { + margin_left->base = i->base; + margin_left->num = base - i->base; + list_add_tail(&margin_left->list, &i->list); + } else + kfree(margin_left); + if ((base + num) < (i->base + i->num)) { + margin_right->base = base + num; + margin_right->num = (i->base + i->num) - + (base + num); + list_add(&margin_right->list, &i->list); + } else + kfree(margin_right); + list_del(&i->list); + kfree(i); + } + spin_unlock_irq(&alloc->lock); +err: + DPRINT("returning %d\n", i ? 0 : -ENOMEM); + DUMP(alloc); + return i ? 0 : -ENOMEM; +} + +int dpa_alloc_pop(struct dpa_alloc *alloc, u32 *result, u32 *count) +{ + struct alloc_node *i = NULL; + DPRINT("alloc_pop()\n"); + DUMP(alloc); + spin_lock_irq(&alloc->lock); + if (!list_empty(&alloc->list)) { + i = list_entry(alloc->list.next, struct alloc_node, list); + list_del(&i->list); + } + spin_unlock_irq(&alloc->lock); + DPRINT("returning %d\n", i ? 0 : -ENOMEM); + DUMP(alloc); + if (!i) + return -ENOMEM; + *result = i->base; + *count = i->num; + kfree(i); + return 0; +} diff --git a/drivers/staging/fsl_qbman/dpa_sys.h b/drivers/staging/fsl_qbman/dpa_sys.h new file mode 100644 index 0000000..505fbf4 --- /dev/null +++ b/drivers/staging/fsl_qbman/dpa_sys.h @@ -0,0 +1,328 @@ +/* Copyright 2008-2012 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DPA_SYS_H +#define DPA_SYS_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define __devinit +#define __devexit_p + +/* When copying aligned words or shorts, try to avoid memcpy() */ +#define CONFIG_TRY_BETTER_MEMCPY + +/* For 2-element tables related to cache-inhibited and cache-enabled mappings */ +#define DPA_PORTAL_CE 0 +#define DPA_PORTAL_CI 1 + +/* These stubs are re-mapped to hypervisor+failover features in kernel trees + * that contain that support. */ +static inline int pamu_enable_liodn(struct device_node *n, int i) +{ + return 0; +} +/***********************/ +/* Misc inline assists */ +/***********************/ + +/* TODO: NB, we currently assume that hwsync() and lwsync() imply compiler + * barriers and that dcb*() won't fall victim to compiler or execution + * reordering with respect to other code/instructions that manipulate the same + * cacheline. */ +#define hwsync() \ + do { \ + __asm__ __volatile__ ("sync" : : : "memory"); \ + } while(0) +#define lwsync() \ + do { \ + __asm__ __volatile__ (stringify_in_c(LWSYNC) : : : "memory"); \ + } while(0) +#define dcbf(p) \ + do { \ + __asm__ __volatile__ ("dcbf 0,%0" : : "r" (p) : "memory"); \ + } while(0) +#define dcbt_ro(p) \ + do { \ + __asm__ __volatile__ ("dcbt 0,%0" : : "r" (p)); \ + } while(0) +#define dcbt_rw(p) \ + do { \ + __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (p)); \ + } while(0) +#define dcbi(p) dcbf(p) +#ifdef CONFIG_PPC_E500MC +#define dcbzl(p) \ + do { \ + __asm__ __volatile__ ("dcbzl 0,%0" : : "r" (p)); \ + } while (0) +#define dcbz_64(p) \ + do { \ + dcbzl(p); \ + } while (0) +#define dcbf_64(p) \ + do { \ + dcbf(p); \ + } while (0) +/* Commonly used combo */ +#define dcbit_ro(p) \ + do { \ + dcbi(p); \ + dcbt_ro(p); \ + } while (0) +#else +#define dcbz(p) \ + do { \ + __asm__ __volatile__ ("dcbz 0,%0" : : "r" (p)); \ + } while (0) +#define dcbz_64(p) \ + do { \ + dcbz((u32)p + 32); \ + dcbz(p); \ + } while (0) +#define dcbf_64(p) \ + do { \ + dcbf((u32)p + 32); \ + dcbf(p); \ + } while (0) +/* Commonly used combo */ +#define dcbit_ro(p) \ + do { \ + dcbi(p); \ + dcbi((u32)p + 32); \ + dcbt_ro(p); \ + dcbt_ro((u32)p + 32); \ + } while (0) +#endif /* CONFIG_PPC_E500MC */ + +static inline u64 mfatb(void) +{ + u32 hi, lo, chk; + do { + hi = mfspr(SPRN_ATBU); + lo = mfspr(SPRN_ATBL); + chk = mfspr(SPRN_ATBU); + } while (unlikely(hi != chk)); + return ((u64)hi << 32) | (u64)lo; +} + +#ifdef CONFIG_FSL_DPA_CHECKING +#define DPA_ASSERT(x) \ + do { \ + if (!(x)) { \ + pr_crit("ASSERT: (%s:%d) %s\n", __FILE__, __LINE__, \ + __stringify_1(x)); \ + dump_stack(); \ + panic("assertion failure"); \ + } \ + } while(0) +#else +#define DPA_ASSERT(x) +#endif + +/* memcpy() stuff - when you know alignments in advance */ +#ifdef CONFIG_TRY_BETTER_MEMCPY +static inline void copy_words(void *dest, const void *src, size_t sz) +{ + u32 *__dest = dest; + const u32 *__src = src; + size_t __sz = sz >> 2; + BUG_ON((unsigned long)dest & 0x3); + BUG_ON((unsigned long)src & 0x3); + BUG_ON(sz & 0x3); + while (__sz--) + *(__dest++) = *(__src++); +} +static inline void copy_shorts(void *dest, const void *src, size_t sz) +{ + u16 *__dest = dest; + const u16 *__src = src; + size_t __sz = sz >> 1; + BUG_ON((unsigned long)dest & 0x1); + BUG_ON((unsigned long)src & 0x1); + BUG_ON(sz & 0x1); + while (__sz--) + *(__dest++) = *(__src++); +} +static inline void copy_bytes(void *dest, const void *src, size_t sz) +{ + u8 *__dest = dest; + const u8 *__src = src; + while (sz--) + *(__dest++) = *(__src++); +} +#else +#define copy_words memcpy +#define copy_shorts memcpy +#define copy_bytes memcpy +#endif + +/************/ +/* RB-trees */ +/************/ + +/* We encapsulate RB-trees so that its easier to use non-linux forms in + * non-linux systems. This also encapsulates the extra plumbing that linux code + * usually provides when using RB-trees. This encapsulation assumes that the + * data type held by the tree is u32. */ + +struct dpa_rbtree { + struct rb_root root; +}; +#define DPA_RBTREE { .root = RB_ROOT } + +static inline void dpa_rbtree_init(struct dpa_rbtree *tree) +{ + tree->root = RB_ROOT; +} + +#define IMPLEMENT_DPA_RBTREE(name, type, node_field, val_field) \ +static inline int name##_push(struct dpa_rbtree *tree, type *obj) \ +{ \ + struct rb_node *parent = NULL, **p = &tree->root.rb_node; \ + while (*p) { \ + u32 item; \ + parent = *p; \ + item = rb_entry(parent, type, node_field)->val_field; \ + if (obj->val_field < item) \ + p = &parent->rb_left; \ + else if (obj->val_field > item) \ + p = &parent->rb_right; \ + else \ + return -EBUSY; \ + } \ + rb_link_node(&obj->node_field, parent, p); \ + rb_insert_color(&obj->node_field, &tree->root); \ + return 0; \ +} \ +static inline void name##_del(struct dpa_rbtree *tree, type *obj) \ +{ \ + rb_erase(&obj->node_field, &tree->root); \ +} \ +static inline type *name##_find(struct dpa_rbtree *tree, u32 val) \ +{ \ + type *ret; \ + struct rb_node *p = tree->root.rb_node; \ + while (p) { \ + ret = rb_entry(p, type, node_field); \ + if (val < ret->val_field) \ + p = p->rb_left; \ + else if (val > ret->val_field) \ + p = p->rb_right; \ + else \ + return ret; \ + } \ + return NULL; \ +} + +/************/ +/* Bootargs */ +/************/ + +/* Qman has "qportals=" and Bman has "bportals=", they use the same syntax + * though; a comma-separated list of items, each item being a cpu index and/or a + * range of cpu indices, and each item optionally be prefixed by "s" to indicate + * that the portal associated with that cpu should be shared. See bman_driver.c + * for more specifics. */ +static int __parse_portals_cpu(const char **s, int *cpu) +{ + *cpu = 0; + if (!isdigit(**s)) + return -EINVAL; + while (isdigit(**s)) + *cpu = *cpu * 10 + (*((*s)++) - '0'); + return 0; +} +static inline int parse_portals_bootarg(char *str, struct cpumask *want_shared, + struct cpumask *want_unshared, + const char *argname) +{ + const char *s = str; + unsigned int shared, cpu1, cpu2, loop; + +keep_going: + if (*s == 's') { + shared = 1; + s++; + } else + shared = 0; + if (__parse_portals_cpu(&s, &cpu1)) + goto err; + if (*s == '-') { + s++; + if (__parse_portals_cpu(&s, &cpu2)) + goto err; + if (cpu2 < cpu1) + goto err; + } else + cpu2 = cpu1; + for (loop = cpu1; loop <= cpu2; loop++) + cpumask_set_cpu(loop, shared ? want_shared : want_unshared); + if (*s == ',') { + s++; + goto keep_going; + } else if ((*s == '\0') || isspace(*s)) + return 0; +err: + pr_crit("Malformed %s argument: %s, offset: %lu\n", argname, str, + (unsigned long)s - (unsigned long)str); + return -EINVAL; +} + +#endif /* DPA_SYS_H */ diff --git a/drivers/staging/fsl_qbman/qman_config.c b/drivers/staging/fsl_qbman/qman_config.c new file mode 100644 index 0000000..c924ab9 --- /dev/null +++ b/drivers/staging/fsl_qbman/qman_config.c @@ -0,0 +1,1221 @@ +/* Copyright 2008-2012 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef CONFIG_SMP +#include /* get_hard_smp_processor_id() */ +#endif + +#include +#include "qman_private.h" + +/* Last updated for v00.800 of the BG */ + +/* Register offsets */ +#define REG_QCSP_LIO_CFG(n) (0x0000 + ((n) * 0x10)) +#define REG_QCSP_IO_CFG(n) (0x0004 + ((n) * 0x10)) +#define REG_QCSP_DD_CFG(n) (0x000c + ((n) * 0x10)) +#define REG_DD_CFG 0x0200 +#define REG_DCP_CFG(n) (0x0300 + ((n) * 0x10)) +#define REG_DCP_DD_CFG(n) (0x0304 + ((n) * 0x10)) +#define REG_DCP_DLM_AVG(n) (0x030c + ((n) * 0x10)) +#define REG_PFDR_FPC 0x0400 +#define REG_PFDR_FP_HEAD 0x0404 +#define REG_PFDR_FP_TAIL 0x0408 +#define REG_PFDR_FP_LWIT 0x0410 +#define REG_PFDR_CFG 0x0414 +#define REG_SFDR_CFG 0x0500 +#define REG_SFDR_IN_USE 0x0504 +#define REG_WQ_CS_CFG(n) (0x0600 + ((n) * 0x04)) +#define REG_WQ_DEF_ENC_WQID 0x0630 +#define REG_WQ_SC_DD_CFG(n) (0x640 + ((n) * 0x04)) +#define REG_WQ_PC_DD_CFG(n) (0x680 + ((n) * 0x04)) +#define REG_WQ_DC0_DD_CFG(n) (0x6c0 + ((n) * 0x04)) +#define REG_WQ_DC1_DD_CFG(n) (0x700 + ((n) * 0x04)) +#define REG_WQ_DCn_DD_CFG(n) (0x6c0 + ((n) * 0x40)) /* n=2,3 */ +#define REG_CM_CFG 0x0800 +#define REG_ECSR 0x0a00 +#define REG_ECIR 0x0a04 +#define REG_EADR 0x0a08 +#define REG_ECIR2 0x0a0c +#define REG_EDATA(n) (0x0a10 + ((n) * 0x04)) +#define REG_SBEC(n) (0x0a80 + ((n) * 0x04)) +#define REG_MCR 0x0b00 +#define REG_MCP(n) (0x0b04 + ((n) * 0x04)) +#define REG_MISC_CFG 0x0be0 +#define REG_HID_CFG 0x0bf0 +#define REG_IDLE_STAT 0x0bf4 +#define REG_IP_REV_1 0x0bf8 +#define REG_IP_REV_2 0x0bfc +#define REG_FQD_BARE 0x0c00 +#define REG_PFDR_BARE 0x0c20 +#define REG_offset_BAR 0x0004 /* relative to REG_[FQD|PFDR]_BARE */ +#define REG_offset_AR 0x0010 /* relative to REG_[FQD|PFDR]_BARE */ +#define REG_QCSP_BARE 0x0c80 +#define REG_QCSP_BAR 0x0c84 +#define REG_CI_SCHED_CFG 0x0d00 +#define REG_SRCIDR 0x0d04 +#define REG_LIODNR 0x0d08 +#define REG_CI_RLM_AVG 0x0d14 +#define REG_ERR_ISR 0x0e00 /* + "enum qm_isr_reg" */ +#define REG_REV3_QCSP_LIO_CFG(n) (0x1000 + ((n) * 0x10)) +#define REG_REV3_QCSP_IO_CFG(n) (0x1004 + ((n) * 0x10)) +#define REG_REV3_QCSP_DD_CFG(n) (0x100c + ((n) * 0x10)) +#define REG_CEETM_CFG_IDX 0x900 +#define REG_CEETM_CFG_PRES 0x904 + +/* Assists for QMAN_MCR */ +#define MCR_INIT_PFDR 0x01000000 +#define MCR_get_rslt(v) (u8)((v) >> 24) +#define MCR_rslt_idle(r) (!rslt || (rslt >= 0xf0)) +#define MCR_rslt_ok(r) (rslt == 0xf0) +#define MCR_rslt_eaccess(r) (rslt == 0xf8) +#define MCR_rslt_inval(r) (rslt == 0xff) + +struct qman; + +/* Follows WQ_CS_CFG0-5 */ +enum qm_wq_class { + qm_wq_portal = 0, + qm_wq_pool = 1, + qm_wq_fman0 = 2, + qm_wq_fman1 = 3, + qm_wq_caam = 4, + qm_wq_pme = 5, + qm_wq_first = qm_wq_portal, + qm_wq_last = qm_wq_pme +}; + +/* Follows FQD_[BARE|BAR|AR] and PFDR_[BARE|BAR|AR] */ +enum qm_memory { + qm_memory_fqd, + qm_memory_pfdr +}; + +/* Used by all error interrupt registers except 'inhibit' */ +#define QM_EIRQ_CIDE 0x20000000 /* Corenet Initiator Data Error */ +#define QM_EIRQ_CTDE 0x10000000 /* Corenet Target Data Error */ +#define QM_EIRQ_CITT 0x08000000 /* Corenet Invalid Target Transaction */ +#define QM_EIRQ_PLWI 0x04000000 /* PFDR Low Watermark */ +#define QM_EIRQ_MBEI 0x02000000 /* Multi-bit ECC Error */ +#define QM_EIRQ_SBEI 0x01000000 /* Single-bit ECC Error */ +#define QM_EIRQ_PEBI 0x00800000 /* PFDR Enqueues Blocked Interrupt */ +#define QM_EIRQ_IFSI 0x00020000 /* Invalid FQ Flow Control State */ +#define QM_EIRQ_ICVI 0x00010000 /* Invalid Command Verb */ +#define QM_EIRQ_IDDI 0x00000800 /* Invalid Dequeue (Direct-connect) */ +#define QM_EIRQ_IDFI 0x00000400 /* Invalid Dequeue FQ */ +#define QM_EIRQ_IDSI 0x00000200 /* Invalid Dequeue Source */ +#define QM_EIRQ_IDQI 0x00000100 /* Invalid Dequeue Queue */ +#define QM_EIRQ_IECE 0x00000010 /* Invalid Enqueue Configuration */ +#define QM_EIRQ_IEOI 0x00000008 /* Invalid Enqueue Overflow */ +#define QM_EIRQ_IESI 0x00000004 /* Invalid Enqueue State */ +#define QM_EIRQ_IECI 0x00000002 /* Invalid Enqueue Channel */ +#define QM_EIRQ_IEQI 0x00000001 /* Invalid Enqueue Queue */ + +/* QMAN_ECIR valid error bit */ +#define PORTAL_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IESI | QM_EIRQ_IEOI | \ + QM_EIRQ_IDQI | QM_EIRQ_IDSI | QM_EIRQ_IDFI | \ + QM_EIRQ_IDDI | QM_EIRQ_ICVI | QM_EIRQ_IFSI) +#define FQID_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IECI | QM_EIRQ_IESI | \ + QM_EIRQ_IEOI | QM_EIRQ_IDQI | QM_EIRQ_IDFI | \ + QM_EIRQ_IFSI) + +union qman_ecir { + u32 ecir_raw; + struct { + u32 __reserved:2; + u32 portal_type:1; + u32 portal_num:5; + u32 fqid:24; + } __packed info; +}; + +union qman_ecir2 { + u32 ecir2_raw; + struct { + u32 portal_type:1; + u32 __reserved:21; + u32 portal_num:10; + } __packed info; +}; + +union qman_eadr { + u32 eadr_raw; + struct { + u32 __reserved1:4; + u32 memid:4; + u32 __reserved2:12; + u32 eadr:12; + } __packed info; + struct { + u32 __reserved1:3; + u32 memid:5; + u32 __reserved:8; + u32 eadr:16; + } __packed info_rev3; +}; + +struct qman_hwerr_txt { + u32 mask; + const char *txt; +}; + +#define QMAN_HWE_TXT(a, b) { .mask = QM_EIRQ_##a, .txt = b } + +static const struct qman_hwerr_txt qman_hwerr_txts[] = { + QMAN_HWE_TXT(CIDE, "Corenet Initiator Data Error"), + QMAN_HWE_TXT(CTDE, "Corenet Target Data Error"), + QMAN_HWE_TXT(CITT, "Corenet Invalid Target Transaction"), + QMAN_HWE_TXT(PLWI, "PFDR Low Watermark"), + QMAN_HWE_TXT(MBEI, "Multi-bit ECC Error"), + QMAN_HWE_TXT(SBEI, "Single-bit ECC Error"), + QMAN_HWE_TXT(PEBI, "PFDR Enqueues Blocked Interrupt"), + QMAN_HWE_TXT(ICVI, "Invalid Command Verb"), + QMAN_HWE_TXT(IFSI, "Invalid Flow Control State"), + QMAN_HWE_TXT(IDDI, "Invalid Dequeue (Direct-connect)"), + QMAN_HWE_TXT(IDFI, "Invalid Dequeue FQ"), + QMAN_HWE_TXT(IDSI, "Invalid Dequeue Source"), + QMAN_HWE_TXT(IDQI, "Invalid Dequeue Queue"), + QMAN_HWE_TXT(IECE, "Invalid Enqueue Configuration"), + QMAN_HWE_TXT(IEOI, "Invalid Enqueue Overflow"), + QMAN_HWE_TXT(IESI, "Invalid Enqueue State"), + QMAN_HWE_TXT(IECI, "Invalid Enqueue Channel"), + QMAN_HWE_TXT(IEQI, "Invalid Enqueue Queue") +}; +#define QMAN_HWE_COUNT (sizeof(qman_hwerr_txts)/sizeof(struct qman_hwerr_txt)) + +struct qman_error_info_mdata { + u16 addr_mask; + u16 bits; + const char *txt; +}; + +#define QMAN_ERR_MDATA(a, b, c) { .addr_mask = a, .bits = b, .txt = c} +static const struct qman_error_info_mdata error_mdata[] = { + QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 0"), + QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 1"), + QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 2"), + QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 3"), + QMAN_ERR_MDATA(0x0FFF, 512, "FQD cache memory"), + QMAN_ERR_MDATA(0x07FF, 128, "SFDR memory"), + QMAN_ERR_MDATA(0x01FF, 72, "WQ context memory"), + QMAN_ERR_MDATA(0x00FF, 240, "CGR memory"), + QMAN_ERR_MDATA(0x00FF, 302, "Internal Order Restoration List memory"), + QMAN_ERR_MDATA(0x01FF, 256, "SW portal ring memory"), + QMAN_ERR_MDATA(0x07FF, 181, "CEETM class queue descriptor memory"), + QMAN_ERR_MDATA(0x0FFF, 140, "CEETM extended SFDR memory"), + QMAN_ERR_MDATA(0x0FFF, 25, "CEETM logical FQ mapping memory"), + QMAN_ERR_MDATA(0x0FFF, 96, "CEETM dequeue context memory"), + QMAN_ERR_MDATA(0x07FF, 396, "CEETM ccgr memory"), + QMAN_ERR_MDATA(0x00FF, 146, "CEETM CQ channel shaping memory"), + QMAN_ERR_MDATA(0x007F, 256, "CEETM CQ channel scheduling memory"), + QMAN_ERR_MDATA(0x01FF, 88, "CEETM dequeue statistics memory"), +}; +#define QMAN_ERR_MDATA_COUNT \ + (sizeof(error_mdata)/sizeof(struct qman_error_info_mdata)) + +/* Add this in Kconfig */ +#define QMAN_ERRS_TO_UNENABLE (QM_EIRQ_PLWI | QM_EIRQ_PEBI) + +/** + * qm_err_isr__ - Manipulate global interrupt registers + * @v: for accessors that write values, this is the 32-bit value + * + * Manipulates QMAN_ERR_ISR, QMAN_ERR_IER, QMAN_ERR_ISDR, QMAN_ERR_IIR. All + * manipulations except qm_err_isr_[un]inhibit() use 32-bit masks composed of + * the QM_EIRQ_*** definitions. Note that "qm_err_isr_enable_write" means + * "write the enable register" rather than "enable the write register"! + */ +#define qm_err_isr_status_read(qm) __qm_err_isr_read(qm, qm_isr_status) +#define qm_err_isr_status_clear(qm, m) __qm_err_isr_write(qm, qm_isr_status,m) +#define qm_err_isr_enable_read(qm) __qm_err_isr_read(qm, qm_isr_enable) +#define qm_err_isr_enable_write(qm, v) __qm_err_isr_write(qm, qm_isr_enable,v) +#define qm_err_isr_disable_read(qm) __qm_err_isr_read(qm, qm_isr_disable) +#define qm_err_isr_disable_write(qm, v) __qm_err_isr_write(qm, qm_isr_disable,v) +#define qm_err_isr_inhibit(qm) __qm_err_isr_write(qm, qm_isr_inhibit,1) +#define qm_err_isr_uninhibit(qm) __qm_err_isr_write(qm, qm_isr_inhibit,0) + +/* + * TODO: unimplemented registers + * + * Keeping a list here of Qman registers I have not yet covered; + * QCSP_DD_IHRSR, QCSP_DD_IHRFR, QCSP_DD_HASR, + * DCP_DD_IHRSR, DCP_DD_IHRFR, DCP_DD_HASR, CM_CFG, + * QMAN_EECC, QMAN_SBET, QMAN_EINJ, QMAN_SBEC0-12 + */ + +/* Encapsulate "struct qman *" as a cast of the register space address. */ + +static struct qman *qm_create(void *regs) +{ + return (struct qman *)regs; +} + +static inline u32 __qm_in(struct qman *qm, u32 offset) +{ + return in_be32((void *)qm + offset); +} +static inline void __qm_out(struct qman *qm, u32 offset, u32 val) +{ + out_be32((void *)qm + offset, val); +} +#define qm_in(reg) __qm_in(qm, REG_##reg) +#define qm_out(reg, val) __qm_out(qm, REG_##reg, val) + +static u32 __qm_err_isr_read(struct qman *qm, enum qm_isr_reg n) +{ + return __qm_in(qm, REG_ERR_ISR + (n << 2)); +} + +static void __qm_err_isr_write(struct qman *qm, enum qm_isr_reg n, u32 val) +{ + __qm_out(qm, REG_ERR_ISR + (n << 2), val); +} + +#if 0 + +static void qm_set_portal(struct qman *qm, u8 swportalID, + u16 ec_tp_cfg, u16 ecd_tp_cfg) +{ + qm_out(QCSP_DD_CFG(swportalID), + ((ec_tp_cfg & 0x1ff) << 16) | (ecd_tp_cfg & 0x1ff)); +} + +static void qm_set_ddebug(struct qman *qm, u8 mdd, u8 m_cfg) +{ + qm_out(DD_CFG, ((mdd & 0x3) << 4) | (m_cfg & 0xf)); +} + +static void qm_set_dc_ddebug(struct qman *qm, enum qm_dc_portal portal, u16 ecd_tp_cfg) +{ + qm_out(DCP_DD_CFG(portal), ecd_tp_cfg & 0x1ff); +} + +static u32 qm_get_pfdr_free_pool_count(struct qman *qm) +{ + return qm_in(PFDR_FPC); +} + +static void qm_get_pfdr_free_pool(struct qman *qm, u32 *head, u32 *tail) +{ + *head = qm_in(PFDR_FP_HEAD); + *tail = qm_in(PFDR_FP_TAIL); +} + +static void qm_set_default_wq(struct qman *qm, u16 wqid) +{ + qm_out(WQ_DEF_ENC_WQID, wqid); +} + +static void qm_set_channel_ddebug(struct qman *qm, u16 channel, u16 tp_cfg) +{ + u32 offset; + int upperhalf = 0; + if ((channel >= QM_CHANNEL_SWPORTAL0) && + (channel <= qm_channel_swportal9)) { + offset = (channel - QM_CHANNEL_SWPORTAL0); + upperhalf = offset & 0x1; + offset = REG_WQ_SC_DD_CFG(offset / 2); + } else if ((channel >= qm_channel_pool1) && + (channel <= qm_channel_pool15)) { + offset = (channel + 1 - qm_channel_pool1); + upperhalf = offset & 0x1; + offset = REG_WQ_PC_DD_CFG(offset / 2); + } else if ((channel >= qm_channel_fman0_sp0) && + (channel <= qm_channel_fman0_sp11)) { + offset = (channel - qm_channel_fman0_sp0); + upperhalf = offset & 0x1; + offset = REG_WQ_DC0_DD_CFG(offset / 2); + } + else if ((channel >= qm_channel_fman1_sp0) && + (channel <= qm_channel_fman1_sp11)) { + offset = (channel - qm_channel_fman1_sp0); + upperhalf = offset & 0x1; + offset = REG_WQ_DC1_DD_CFG(offset / 2); + } + else if (channel == qm_channel_caam) + offset = REG_WQ_DCn_DD_CFG(2); + else if (channel == qm_channel_pme) + offset = REG_WQ_DCn_DD_CFG(3); + else { + pr_crit("Illegal qm_channel type %d\n", channel); + return; + } + __qm_out(qm, offset, upperhalf ? ((u32)tp_cfg << 16) : tp_cfg); +} + +static void qm_get_details(struct qman *qm, u8 *int_options, u8 *errata, + u8 *conf_options) +{ + u32 v = qm_in(IP_REV_1); + *int_options = (v >> 16) & 0xff; + *errata = (v >> 8) & 0xff; + *conf_options = v & 0xff; +} + +static void qm_set_corenet_bar(struct qman *qm, u16 eba, u32 ba) +{ + /* choke if 'ba' isn't properly aligned */ + DPA_ASSERT(!(ba & 0x001fffff)); + qm_out(QCSP_BARE, eba); + qm_out(QCSP_BAR, ba); +} + +static u8 qm_get_corenet_sourceid(struct qman *qm) +{ + return qm_in(SRCIDR); +} + +static u16 qm_get_liodn(struct qman *qm) +{ + return qm_in(LIODNR); +} + +static void qm_set_congestion_config(struct qman *qm, u16 pres) +{ + qm_out(CM_CFG, pres); +} + +#endif + +static void qm_set_dc(struct qman *qm, enum qm_dc_portal portal, + int ed, u8 sernd) +{ + DPA_ASSERT(!ed || (portal == qm_dc_portal_fman0) || + (portal == qm_dc_portal_fman1)); + if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) + qm_out(DCP_CFG(portal), (ed ? 0x1000 : 0) | (sernd & 0x3ff)); + else + qm_out(DCP_CFG(portal), (ed ? 0x100 : 0) | (sernd & 0x1f)); +} + +static void qm_set_wq_scheduling(struct qman *qm, enum qm_wq_class wq_class, + u8 cs_elev, u8 csw2, u8 csw3, u8 csw4, u8 csw5, + u8 csw6, u8 csw7) +{ +#ifdef CONFIG_FSL_QMAN_BUG_AND_FEATURE_REV1 +#define csw(x) \ +do { \ + if (++x == 8) \ + x = 7; \ +} while (0) + if (qman_ip_rev == QMAN_REV10) { + csw(csw2); + csw(csw3); + csw(csw4); + csw(csw5); + csw(csw6); + csw(csw7); + } +#endif + qm_out(WQ_CS_CFG(wq_class), ((cs_elev & 0xff) << 24) | + ((csw2 & 0x7) << 20) | ((csw3 & 0x7) << 16) | + ((csw4 & 0x7) << 12) | ((csw5 & 0x7) << 8) | + ((csw6 & 0x7) << 4) | (csw7 & 0x7)); +} + +static void qm_set_hid(struct qman *qm) +{ +#ifdef CONFIG_FSL_QMAN_BUG_AND_FEATURE_REV1 + if (qman_ip_rev == QMAN_REV10) + qm_out(HID_CFG, 3); + else +#endif + qm_out(HID_CFG, 0); +} + +static void qm_set_corenet_initiator(struct qman *qm) +{ + qm_out(CI_SCHED_CFG, + 0x80000000 | /* write srcciv enable */ + (CONFIG_FSL_QMAN_CI_SCHED_CFG_SRCCIV << 24) | + (CONFIG_FSL_QMAN_CI_SCHED_CFG_SRQ_W << 8) | + (CONFIG_FSL_QMAN_CI_SCHED_CFG_RW_W << 4) | + CONFIG_FSL_QMAN_CI_SCHED_CFG_BMAN_W); +} + +static void qm_get_version(struct qman *qm, u16 *id, u8 *major, u8 *minor) +{ + u32 v = qm_in(IP_REV_1); + *id = (v >> 16); + *major = (v >> 8) & 0xff; + *minor = v & 0xff; +} + +static void qm_set_memory(struct qman *qm, enum qm_memory memory, u64 ba, + int enable, int prio, int stash, u32 size) +{ + u32 offset = (memory == qm_memory_fqd) ? REG_FQD_BARE : REG_PFDR_BARE; + u32 exp = ilog2(size); + /* choke if size isn't within range */ + DPA_ASSERT((size >= 4096) && (size <= 1073741824) && + is_power_of_2(size)); + /* choke if 'ba' has lower-alignment than 'size' */ + DPA_ASSERT(!(ba & (size - 1))); + __qm_out(qm, offset, upper_32_bits(ba)); + __qm_out(qm, offset + REG_offset_BAR, lower_32_bits(ba)); + __qm_out(qm, offset + REG_offset_AR, + (enable ? 0x80000000 : 0) | + (prio ? 0x40000000 : 0) | + (stash ? 0x20000000 : 0) | + (exp - 1)); +} + +static void qm_set_pfdr_threshold(struct qman *qm, u32 th, u8 k) +{ + qm_out(PFDR_FP_LWIT, th & 0xffffff); + qm_out(PFDR_CFG, k); +} + +static void qm_set_sfdr_threshold(struct qman *qm, u16 th) +{ + qm_out(SFDR_CFG, th & 0x3ff); +} + +static int qm_init_pfdr(struct qman *qm, u32 pfdr_start, u32 num) +{ + u8 rslt = MCR_get_rslt(qm_in(MCR)); + + DPA_ASSERT(pfdr_start && !(pfdr_start & 7) && !(num & 7) && num); + /* Make sure the command interface is 'idle' */ + if(!MCR_rslt_idle(rslt)) + panic("QMAN_MCR isn't idle"); + + /* Write the MCR command params then the verb */ + qm_out(MCP(0), pfdr_start ); + /* TODO: remove this - it's a workaround for a model bug that is + * corrected in more recent versions. We use the workaround until + * everyone has upgraded. */ + qm_out(MCP(1), (pfdr_start + num - 16)); + lwsync(); + qm_out(MCR, MCR_INIT_PFDR); + /* Poll for the result */ + do { + rslt = MCR_get_rslt(qm_in(MCR)); + } while(!MCR_rslt_idle(rslt)); + if (MCR_rslt_ok(rslt)) + return 0; + if (MCR_rslt_eaccess(rslt)) + return -EACCES; + if (MCR_rslt_inval(rslt)) + return -EINVAL; + pr_crit("Unexpected result from MCR_INIT_PFDR: %02x\n", rslt); + return -ENOSYS; +} + +/*****************/ +/* Config driver */ +/*****************/ + +#define DEFAULT_FQD_SZ (PAGE_SIZE << CONFIG_FSL_QMAN_FQD_SZ) +#define DEFAULT_PFDR_SZ (PAGE_SIZE << CONFIG_FSL_QMAN_PFDR_SZ) + +/* We support only one of these */ +static struct qman *qm; +static struct device_node *qm_node; + +/* And this state belongs to 'qm'. It is set during fsl_qman_init(), but used + * during qman_init_ccsr(). */ +static dma_addr_t fqd_a, pfdr_a; +static size_t fqd_sz = DEFAULT_FQD_SZ, pfdr_sz = DEFAULT_PFDR_SZ; + +/* Parse the property to extract the memory location and size and + * memblock_reserve() it. If it isn't supplied, memblock_alloc() the default + * size. Also flush this memory range from data cache so that QMAN originated + * transactions for this memory region could be marked non-coherent. + */ +static __init int parse_mem_property(struct device_node *node, const char *name, + dma_addr_t *addr, size_t *sz, int zero) +{ + const u32 *pint; + int ret; + unsigned long vaddr; + + pint = of_get_property(node, name, &ret); + if (!pint || (ret != 16)) { + pr_info("No %s property '%s', using memblock_alloc(%016zx)\n", + node->full_name, name, *sz); + *addr = memblock_alloc(*sz, *sz); + vaddr = (unsigned long)phys_to_virt(*addr); + if (zero) + memset((void *)vaddr, 0, *sz); + flush_dcache_range(vaddr, vaddr + *sz); + return 0; + } + pr_info("Using %s property '%s'\n", node->full_name, name); + /* If using a "zero-pma", don't try to zero it, even if you asked */ + if (zero && of_find_property(node, "zero-pma", &ret)) { + pr_info(" it's a 'zero-pma', not zeroing from s/w\n"); + zero = 0; + } + *addr = ((u64)pint[0] << 32) | (u64)pint[1]; + *sz = ((u64)pint[2] << 32) | (u64)pint[3]; + /* Keep things simple, it's either all in the DRAM range or it's all + * outside. */ + if (*addr < memblock_end_of_DRAM()) { + BUG_ON((u64)*addr + (u64)*sz > memblock_end_of_DRAM()); + if (memblock_reserve(*addr, *sz) < 0) { + pr_err("Failed to reserve %s\n", name); + return -ENOMEM; + } + vaddr = (unsigned long)phys_to_virt(*addr); + if (zero) + memset(phys_to_virt(*addr), 0, *sz); + flush_dcache_range(vaddr, vaddr + *sz); + } else if (zero) { + /* map as cacheable, non-guarded */ + void *tmpp = ioremap_prot(*addr, *sz, 0); + memset(tmpp, 0, *sz); + vaddr = (unsigned long)tmpp; + flush_dcache_range(vaddr, vaddr + *sz); + iounmap(tmpp); + } + return 0; +} + +/* TODO: + * - there is obviously no handling of errors, + * - the calls to qm_set_memory() hard-code the priority and CPC-stashing for + * both memory resources to zero. + */ +static int __init fsl_qman_init(struct device_node *node) +{ + struct resource res; + u32 __iomem *regs; + const char *s; + int ret, standby = 0; + u16 id; + u8 major, minor; + ret = of_address_to_resource(node, 0, &res); + if (ret) { + pr_err("Can't get %s property '%s'\n", node->full_name, "reg"); + return ret; + } + s = of_get_property(node, "fsl,hv-claimable", &ret); + if (s && !strcmp(s, "standby")) + standby = 1; + if (!standby) { + ret = parse_mem_property(node, "fsl,qman-fqd", + &fqd_a, &fqd_sz, 1); + BUG_ON(ret); + ret = parse_mem_property(node, "fsl,qman-pfdr", + &pfdr_a, &pfdr_sz, 0); + BUG_ON(ret); + } + /* Global configuration */ + regs = ioremap(res.start, res.end - res.start + 1); + qm = qm_create(regs); + qm_node = node; + qm_get_version(qm, &id, &major, &minor); + pr_info("Qman ver:%04x,%02x,%02x\n", id, major, minor); + if (!qman_ip_rev) { + if ((major == 1) && (minor == 0)) + qman_ip_rev = QMAN_REV10; + else if ((major == 1) && (minor == 1)) + qman_ip_rev = QMAN_REV11; + else if ((major == 1) && (minor == 2)) + qman_ip_rev = QMAN_REV12; + else if ((major == 2) && (minor == 0)) + qman_ip_rev = QMAN_REV20; + else if ((major == 3) && (minor == 0)) + qman_ip_rev = QMAN_REV30; + else { + pr_warning("unknown Qman version, default to rev1.1\n"); + qman_ip_rev = QMAN_REV11; + } + } + + if (standby) { + pr_info(" -> in standby mode\n"); + return 0; + } + return 0; +} + +int qman_have_ccsr(void) +{ + return qm ? 1 : 0; +} + +__init void qman_init_early(void) +{ + struct device_node *dn; + int ret; + + for_each_compatible_node(dn, NULL, "fsl,qman") { + if (qm) + pr_err("%s: only one 'fsl,qman' allowed\n", + dn->full_name); + else { + if (!of_device_is_available(dn)) + continue; + + ret = fsl_qman_init(dn); + BUG_ON(ret); + } + } +} + +static void log_edata_bits(u32 bit_count) +{ + u32 i, j, mask = 0xffffffff; + + pr_warning("Qman ErrInt, EDATA:\n"); + i = bit_count/32; + if (bit_count%32) { + i++; + mask = ~(mask << bit_count%32); + } + j = 16-i; + pr_warning(" 0x%08x\n", qm_in(EDATA(j)) & mask); + j++; + for (; j < 16; j++) + pr_warning(" 0x%08x\n", qm_in(EDATA(j))); +} + +static void log_additional_error_info(u32 isr_val, u32 ecsr_val) +{ + union qman_ecir ecir_val; + union qman_eadr eadr_val; + + ecir_val.ecir_raw = qm_in(ECIR); + /* Is portal info valid */ + if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) { + union qman_ecir2 ecir2_val; + ecir2_val.ecir2_raw = qm_in(ECIR2); + if (ecsr_val & PORTAL_ECSR_ERR) { + pr_warning("Qman ErrInt: %s id %d\n", + (ecir2_val.info.portal_type) ? + "DCP" : "SWP", ecir2_val.info.portal_num); + } + if (ecsr_val & (FQID_ECSR_ERR | QM_EIRQ_IECE)) { + pr_warning("Qman ErrInt: ecir.fqid 0x%x\n", + ecir_val.info.fqid); + } + if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) { + eadr_val.eadr_raw = qm_in(EADR); + pr_warning("Qman ErrInt: EADR Memory: %s, 0x%x\n", + error_mdata[eadr_val.info_rev3.memid].txt, + error_mdata[eadr_val.info_rev3.memid].addr_mask + & eadr_val.info_rev3.eadr); + log_edata_bits( + error_mdata[eadr_val.info_rev3.memid].bits); + } + } else { + if (ecsr_val & PORTAL_ECSR_ERR) { + pr_warning("Qman ErrInt: %s id %d\n", + (ecir_val.info.portal_type) ? + "DCP" : "SWP", ecir_val.info.portal_num); + } + if (ecsr_val & FQID_ECSR_ERR) { + pr_warning("Qman ErrInt: ecir.fqid 0x%x\n", + ecir_val.info.fqid); + } + if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) { + eadr_val.eadr_raw = qm_in(EADR); + pr_warning("Qman ErrInt: EADR Memory: %s, 0x%x\n", + error_mdata[eadr_val.info.memid].txt, + error_mdata[eadr_val.info.memid].addr_mask + & eadr_val.info.eadr); + log_edata_bits(error_mdata[eadr_val.info.memid].bits); + } + } +} + +/* Qman interrupt handler */ +static irqreturn_t qman_isr(int irq, void *ptr) +{ + u32 isr_val, ier_val, ecsr_val, isr_mask, i; + + ier_val = qm_err_isr_enable_read(qm); + isr_val = qm_err_isr_status_read(qm); + ecsr_val = qm_in(ECSR); + isr_mask = isr_val & ier_val; + + if (!isr_mask) + return IRQ_NONE; + for (i = 0; i < QMAN_HWE_COUNT; i++) { + if (qman_hwerr_txts[i].mask & isr_mask) { + pr_warning("Qman ErrInt: %s\n", qman_hwerr_txts[i].txt); + if (qman_hwerr_txts[i].mask & ecsr_val) { + log_additional_error_info(isr_mask, ecsr_val); + /* Re-arm error capture registers */ + qm_out(ECSR, ecsr_val); + } + if (qman_hwerr_txts[i].mask & QMAN_ERRS_TO_UNENABLE) { + pr_devel("Qman un-enabling error 0x%x\n", + qman_hwerr_txts[i].mask); + ier_val &= ~qman_hwerr_txts[i].mask; + qm_err_isr_enable_write(qm, ier_val); + } + } + } + qm_err_isr_status_clear(qm, isr_val); + return IRQ_HANDLED; +} + +static int __bind_irq(void) +{ + int ret, err_irq; + + err_irq = of_irq_to_resource(qm_node, 0, NULL); + if (err_irq == NO_IRQ) { + pr_info("Can't get %s property '%s'\n", qm_node->full_name, + "interrupts"); + return -ENODEV; + } + ret = request_irq(err_irq, qman_isr, IRQF_SHARED, "qman-err", qm_node); + if (ret) { + pr_err("request_irq() failed %d for '%s'\n", ret, + qm_node->full_name); + return -ENODEV; + } + /* Write-to-clear any stale bits, (eg. starvation being asserted prior + * to resource allocation during driver init). */ + qm_err_isr_status_clear(qm, 0xffffffff); + /* Enable Error Interrupts */ + qm_err_isr_enable_write(qm, 0xffffffff); + return 0; +} + +int qman_init_ccsr(struct device_node *node) +{ + int ret; + if (!qman_have_ccsr()) + return 0; + if (node != qm_node) + return -EINVAL; + /* FQD memory */ + qm_set_memory(qm, qm_memory_fqd, fqd_a, 1, 0, 0, fqd_sz); + /* PFDR memory */ + qm_set_memory(qm, qm_memory_pfdr, pfdr_a, 1, 0, 0, pfdr_sz); + qm_init_pfdr(qm, 8, pfdr_sz / 64 - 8); + /* thresholds */ + qm_set_pfdr_threshold(qm, 512, 64); + qm_set_sfdr_threshold(qm, 128); + /* clear stale PEBI bit from interrupt status register */ + qm_err_isr_status_clear(qm, QM_EIRQ_PEBI); + /* corenet initiator settings */ + qm_set_corenet_initiator(qm); + /* HID settings */ + qm_set_hid(qm); + /* Set scheduling weights to defaults */ + for (ret = qm_wq_first; ret <= qm_wq_last; ret++) + qm_set_wq_scheduling(qm, ret, 0, 0, 0, 0, 0, 0, 0); + /* We are not prepared to accept ERNs for hardware enqueues */ + qm_set_dc(qm, qm_dc_portal_fman0, 1, 0); + qm_set_dc(qm, qm_dc_portal_fman1, 1, 0); + /* Initialise Error Interrupt Handler */ + ret = __bind_irq(); + if (ret) + return ret; + return 0; +} + +#define LIO_CFG_LIODN_MASK 0x0fff0000 +void qman_liodn_fixup(u16 channel) +{ + static int done; + static u32 liodn_offset; + u32 before, after; + int idx = channel - QM_CHANNEL_SWPORTAL0; + + if (!qman_have_ccsr()) + return; + if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) + before = qm_in(REV3_QCSP_LIO_CFG(idx)); + else + before = qm_in(QCSP_LIO_CFG(idx)); + if (!done) { + liodn_offset = before & LIO_CFG_LIODN_MASK; + done = 1; + return; + } + after = (before & (~LIO_CFG_LIODN_MASK)) | liodn_offset; + if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) + qm_out(REV3_QCSP_LIO_CFG(idx), after); + else + qm_out(QCSP_LIO_CFG(idx), after); +} + +#define IO_CFG_SDEST_MASK 0x00ff0000 +int qman_set_sdest(u16 channel, unsigned int cpu_idx) +{ + int idx = channel - QM_CHANNEL_SWPORTAL0; + u32 before, after; + + if (!qman_have_ccsr()) + return -ENODEV; + + if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) { + before = qm_in(REV3_QCSP_IO_CFG(idx)); + /* Each pair of vcpu share the same SRQ(SDEST) */ + cpu_idx /= 2; + after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16); + qm_out(REV3_QCSP_IO_CFG(idx), after); + } else { + before = qm_in(QCSP_IO_CFG(idx)); + after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16); + qm_out(QCSP_IO_CFG(idx), after); + } + return 0; +} + +#define MISC_CFG_WPM_MASK 0x00000002 +int qm_set_wpm(int wpm) +{ + u32 before; + u32 after; + + if (!qman_have_ccsr()) + return -ENODEV; + + before = qm_in(MISC_CFG); + after = (before & (~MISC_CFG_WPM_MASK)) | (wpm << 1); + qm_out(MISC_CFG, after); + return 0; +} + +int qm_get_wpm(int *wpm) +{ + u32 before; + + if (!qman_have_ccsr()) + return -ENODEV; + + before = qm_in(MISC_CFG); + *wpm = (before & MISC_CFG_WPM_MASK) >> 1; + return 0; +} + +/* CEETM_CFG_PRES register has PRES field which is calculated by: + * PRES = (2^22 / credit update reference period) * QMan clock period + * = (2^22 * 10^9)/ CONFIG_QMAN_CEETM_UPDATE_PERIOD) / qman_clk + */ + +int qman_ceetm_set_prescaler(enum qm_dc_portal portal) +{ + u64 temp; + u16 pres; + + if (!qman_have_ccsr()) + return -ENODEV; + + temp = 0x400000 * 100; + temp /= CONFIG_QMAN_CEETM_UPDATE_PERIOD; + temp *= 10000000; + pres = (u16)(temp / qman_clk); + + qm_out(CEETM_CFG_IDX, portal); + qm_out(CEETM_CFG_PRES, pres); + return 0; +} + +int qman_ceetm_get_prescaler(u16 *pres) +{ + if (!qman_have_ccsr()) + return -ENODEV; + *pres = (u16)qm_in(CEETM_CFG_PRES); + return 0; +} + +#define DCP_CFG_CEETME_MASK 0xFFFF0000 +#define QM_SP_ENABLE_CEETM(n) (0x80000000 >> (n)) +int qman_sp_enable_ceetm_mode(enum qm_dc_portal portal, u16 sub_portal) +{ + u32 dcp_cfg; + + if (!qman_have_ccsr()) + return -ENODEV; + + dcp_cfg = qm_in(DCP_CFG(portal)); + dcp_cfg |= QM_SP_ENABLE_CEETM(sub_portal); + qm_out(DCP_CFG(portal), dcp_cfg); + return 0; +} + +int qman_sp_disable_ceetm_mode(enum qm_dc_portal portal, u16 sub_portal) +{ + u32 dcp_cfg; + + if (!qman_have_ccsr()) + return -ENODEV; + dcp_cfg = qm_in(DCP_CFG(portal)); + dcp_cfg &= ~(QM_SP_ENABLE_CEETM(sub_portal)); + qm_out(DCP_CFG(portal), dcp_cfg); + return 0; +} + +#ifdef CONFIG_SYSFS + +#define DRV_NAME "fsl-qman" + +static ssize_t show_pfdr_fpc(struct device *dev, + struct device_attribute *dev_attr, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(PFDR_FPC)); +}; + +static ssize_t show_dlm_avg(struct device *dev, + struct device_attribute *dev_attr, char *buf) +{ + u32 data; + int i; + + if (!sscanf(dev_attr->attr.name, "dcp%d_dlm_avg", &i)) + return -EINVAL; + data = qm_in(DCP_DLM_AVG(i)); + return snprintf(buf, PAGE_SIZE, "%d.%08d\n", data>>8, + (data & 0x000000ff)*390625); +}; + +static ssize_t set_dlm_avg(struct device *dev, + struct device_attribute *dev_attr, const char *buf, size_t count) +{ + unsigned long val; + int i; + + if (!sscanf(dev_attr->attr.name, "dcp%d_dlm_avg", &i)) + return -EINVAL; + if (strict_strtoul(buf, 0, &val)) { + dev_dbg(dev, "invalid input %s\n", buf); + return -EINVAL; + } + qm_out(DCP_DLM_AVG(i), val); + return count; +}; + +static ssize_t show_pfdr_cfg(struct device *dev, + struct device_attribute *dev_attr, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(PFDR_CFG)); +}; + +static ssize_t set_pfdr_cfg(struct device *dev, + struct device_attribute *dev_attr, const char *buf, size_t count) +{ + unsigned long val; + + if (strict_strtoul(buf, 0, &val)) { + dev_dbg(dev, "invalid input %s\n", buf); + return -EINVAL; + } + qm_out(PFDR_CFG, val); + return count; +}; + +static ssize_t show_sfdr_in_use(struct device *dev, + struct device_attribute *dev_attr, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(SFDR_IN_USE)); +}; + +static ssize_t show_idle_stat(struct device *dev, + struct device_attribute *dev_attr, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(IDLE_STAT)); +}; + +static ssize_t show_ci_rlm_avg(struct device *dev, + struct device_attribute *dev_attr, char *buf) +{ + u32 data = qm_in(CI_RLM_AVG); + return snprintf(buf, PAGE_SIZE, "%d.%08d\n", data>>8, + (data & 0x000000ff)*390625); +}; + +static ssize_t set_ci_rlm_avg(struct device *dev, + struct device_attribute *dev_attr, const char *buf, size_t count) +{ + unsigned long val; + + if (strict_strtoul(buf, 0, &val)) { + dev_dbg(dev, "invalid input %s\n", buf); + return -EINVAL; + } + qm_out(CI_RLM_AVG, val); + return count; +}; + +static ssize_t show_err_isr(struct device *dev, + struct device_attribute *dev_attr, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "0x%08x\n", qm_in(ERR_ISR)); +}; + + +static ssize_t show_sbec(struct device *dev, + struct device_attribute *dev_attr, char *buf) +{ + int i; + + if (!sscanf(dev_attr->attr.name, "sbec_%d", &i)) + return -EINVAL; + return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(SBEC(i))); +}; + +static DEVICE_ATTR(pfdr_fpc, S_IRUSR, show_pfdr_fpc, NULL); +static DEVICE_ATTR(pfdr_cfg, S_IRUSR, show_pfdr_cfg, set_pfdr_cfg); +static DEVICE_ATTR(idle_stat, S_IRUSR, show_idle_stat, NULL); +static DEVICE_ATTR(ci_rlm_avg, (S_IRUSR|S_IWUGO), + show_ci_rlm_avg, set_ci_rlm_avg); +static DEVICE_ATTR(err_isr, S_IRUSR, show_err_isr, NULL); +static DEVICE_ATTR(sfdr_in_use, S_IRUSR, show_sfdr_in_use, NULL); + +static DEVICE_ATTR(dcp0_dlm_avg, (S_IRUSR|S_IWUGO), show_dlm_avg, set_dlm_avg); +static DEVICE_ATTR(dcp1_dlm_avg, (S_IRUSR|S_IWUGO), show_dlm_avg, set_dlm_avg); +static DEVICE_ATTR(dcp2_dlm_avg, (S_IRUSR|S_IWUGO), show_dlm_avg, set_dlm_avg); +static DEVICE_ATTR(dcp3_dlm_avg, (S_IRUSR|S_IWUGO), show_dlm_avg, set_dlm_avg); + +static DEVICE_ATTR(sbec_0, S_IRUSR, show_sbec, NULL); +static DEVICE_ATTR(sbec_1, S_IRUSR, show_sbec, NULL); +static DEVICE_ATTR(sbec_2, S_IRUSR, show_sbec, NULL); +static DEVICE_ATTR(sbec_3, S_IRUSR, show_sbec, NULL); +static DEVICE_ATTR(sbec_4, S_IRUSR, show_sbec, NULL); +static DEVICE_ATTR(sbec_5, S_IRUSR, show_sbec, NULL); +static DEVICE_ATTR(sbec_6, S_IRUSR, show_sbec, NULL); +static DEVICE_ATTR(sbec_7, S_IRUSR, show_sbec, NULL); +static DEVICE_ATTR(sbec_8, S_IRUSR, show_sbec, NULL); +static DEVICE_ATTR(sbec_9, S_IRUSR, show_sbec, NULL); +static DEVICE_ATTR(sbec_10, S_IRUSR, show_sbec, NULL); +static DEVICE_ATTR(sbec_11, S_IRUSR, show_sbec, NULL); +static DEVICE_ATTR(sbec_12, S_IRUSR, show_sbec, NULL); +static DEVICE_ATTR(sbec_13, S_IRUSR, show_sbec, NULL); +static DEVICE_ATTR(sbec_14, S_IRUSR, show_sbec, NULL); + +static struct attribute *qman_dev_attributes[] = { + &dev_attr_pfdr_fpc.attr, + &dev_attr_pfdr_cfg.attr, + &dev_attr_idle_stat.attr, + &dev_attr_ci_rlm_avg.attr, + &dev_attr_err_isr.attr, + &dev_attr_dcp0_dlm_avg.attr, + &dev_attr_dcp1_dlm_avg.attr, + &dev_attr_dcp2_dlm_avg.attr, + &dev_attr_dcp3_dlm_avg.attr, + /* sfdr_in_use will be added if necessary */ + NULL +}; + +static struct attribute *qman_dev_ecr_attributes[] = { + &dev_attr_sbec_0.attr, + &dev_attr_sbec_1.attr, + &dev_attr_sbec_2.attr, + &dev_attr_sbec_3.attr, + &dev_attr_sbec_4.attr, + &dev_attr_sbec_5.attr, + &dev_attr_sbec_6.attr, + &dev_attr_sbec_7.attr, + &dev_attr_sbec_8.attr, + &dev_attr_sbec_9.attr, + &dev_attr_sbec_10.attr, + &dev_attr_sbec_11.attr, + &dev_attr_sbec_12.attr, + &dev_attr_sbec_13.attr, + &dev_attr_sbec_14.attr, + NULL +}; + +/* root level */ +static const struct attribute_group qman_dev_attr_grp = { + .name = NULL, + .attrs = qman_dev_attributes +}; +static const struct attribute_group qman_dev_ecr_grp = { + .name = "error_capture", + .attrs = qman_dev_ecr_attributes +}; + +static int of_fsl_qman_remove(struct platform_device *ofdev) +{ + sysfs_remove_group(&ofdev->dev.kobj, &qman_dev_attr_grp); + return 0; +}; + +static int __devinit of_fsl_qman_probe(struct platform_device *ofdev) +{ + int ret; + + ret = sysfs_create_group(&ofdev->dev.kobj, &qman_dev_attr_grp); + if (ret) + goto done; + if (qman_ip_rev != QMAN_REV10) { + ret = sysfs_add_file_to_group(&ofdev->dev.kobj, + &dev_attr_sfdr_in_use.attr, qman_dev_attr_grp.name); + if (ret) + goto del_group_0; + } + ret = sysfs_create_group(&ofdev->dev.kobj, &qman_dev_ecr_grp); + if (ret) + goto del_group_0; + + goto done; + +del_group_0: + sysfs_remove_group(&ofdev->dev.kobj, &qman_dev_attr_grp); +done: + if (ret) + dev_err(&ofdev->dev, + "Cannot create dev attributes ret=%d\n", ret); + return ret; +}; + +static struct of_device_id of_fsl_qman_ids[] = { + { + .compatible = "fsl,qman", + }, + {} +}; +MODULE_DEVICE_TABLE(of, of_fsl_qman_ids); + +static struct platform_driver of_fsl_qman_driver = { + .driver = { + .owner = THIS_MODULE, + .name = DRV_NAME, + .of_match_table = of_fsl_qman_ids, + }, + .probe = of_fsl_qman_probe, + .remove = __devexit_p(of_fsl_qman_remove), +}; + +static int qman_ctrl_init(void) +{ + return platform_driver_register(&of_fsl_qman_driver); +} + +static void qman_ctrl_exit(void) +{ + platform_driver_unregister(&of_fsl_qman_driver); +} + +module_init(qman_ctrl_init); +module_exit(qman_ctrl_exit); + +#endif /* CONFIG_SYSFS */ diff --git a/drivers/staging/fsl_qbman/qman_debugfs.c b/drivers/staging/fsl_qbman/qman_debugfs.c new file mode 100644 index 0000000..8a17550 --- /dev/null +++ b/drivers/staging/fsl_qbman/qman_debugfs.c @@ -0,0 +1,1404 @@ +/* Copyright 2010-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include "qman_private.h" + +#define MAX_FQID (0x00ffffff) +#define QM_FQD_BLOCK_SIZE 64 +#define QM_FQD_AR (0xC10) + +static u32 fqid_max; +static u64 qman_ccsr_start; +static u64 qman_ccsr_size; + +static const char *state_txt[] = { + "Out of Service", + "Retired", + "Tentatively Scheduled", + "Truly Scheduled", + "Parked", + "Active, Active Held or Held Suspended", + "Unknown State 6", + "Unknown State 7", + NULL, +}; + +static const u8 fqd_states[] = { + QM_MCR_NP_STATE_OOS, QM_MCR_NP_STATE_RETIRED, QM_MCR_NP_STATE_TEN_SCHED, + QM_MCR_NP_STATE_TRU_SCHED, QM_MCR_NP_STATE_PARKED, + QM_MCR_NP_STATE_ACTIVE}; +static const u32 fqd_states_count = sizeof(fqd_states)/sizeof(u8); + +struct mask_to_text { + u16 mask; + const char *txt; +}; + +struct mask_filter_s { + u16 mask; + u8 filter; +}; + +static const struct mask_filter_s mask_filter[] = { + {QM_FQCTRL_PREFERINCACHE, 0}, + {QM_FQCTRL_PREFERINCACHE, 1}, + {QM_FQCTRL_HOLDACTIVE, 0}, + {QM_FQCTRL_HOLDACTIVE, 1}, + {QM_FQCTRL_AVOIDBLOCK, 0}, + {QM_FQCTRL_AVOIDBLOCK, 1}, + {QM_FQCTRL_FORCESFDR, 0}, + {QM_FQCTRL_FORCESFDR, 1}, + {QM_FQCTRL_CPCSTASH, 0}, + {QM_FQCTRL_CPCSTASH, 1}, + {QM_FQCTRL_CTXASTASHING, 0}, + {QM_FQCTRL_CTXASTASHING, 1}, + {QM_FQCTRL_ORP, 0}, + {QM_FQCTRL_ORP, 1}, + {QM_FQCTRL_TDE, 0}, + {QM_FQCTRL_TDE, 1}, + {QM_FQCTRL_CGE, 0}, + {QM_FQCTRL_CGE, 1} +}; +static const u32 mask_filter_count = + sizeof(mask_filter)/sizeof(struct mask_filter_s); + +static const struct mask_to_text fq_ctrl_text_list[] = { + { + .mask = QM_FQCTRL_PREFERINCACHE, + .txt = "Prefer in cache", + }, + { + .mask = QM_FQCTRL_HOLDACTIVE, + .txt = "Hold active in portal", + }, + { + .mask = QM_FQCTRL_AVOIDBLOCK, + .txt = "Avoid Blocking", + }, + { + .mask = QM_FQCTRL_FORCESFDR, + .txt = "High-priority SFDRs", + }, + { + .mask = QM_FQCTRL_CPCSTASH, + .txt = "CPC Stash Enable", + }, + { + .mask = QM_FQCTRL_CTXASTASHING, + .txt = "Context-A stashing", + }, + { + .mask = QM_FQCTRL_ORP, + .txt = "ORP Enable", + }, + { + .mask = QM_FQCTRL_TDE, + .txt = "Tail-Drop Enable", + }, + { + .mask = QM_FQCTRL_CGE, + .txt = "Congestion Group Enable", + }, + { + .mask = 0, + .txt = NULL, + } +}; + +static const char *get_fqd_ctrl_text(u16 mask) +{ + int i = 0; + + while (fq_ctrl_text_list[i].txt != NULL) { + if (fq_ctrl_text_list[i].mask == mask) + return fq_ctrl_text_list[i].txt; + i++; + } + return NULL; +} + +static const struct mask_to_text stashing_text_list[] = { + { + .mask = QM_STASHING_EXCL_CTX, + .txt = "FQ Ctx Stash" + }, + { + .mask = QM_STASHING_EXCL_DATA, + .txt = "Frame Data Stash", + }, + { + .mask = QM_STASHING_EXCL_ANNOTATION, + .txt = "Frame Annotation Stash", + }, + { + .mask = 0, + .txt = NULL, + }, +}; + +static int user_input_convert(const char __user *user_buf, size_t count, + unsigned long *val) +{ + char buf[12]; + + if (count > sizeof(buf) - 1) + return -EINVAL; + if (copy_from_user(buf, user_buf, count)) + return -EFAULT; + buf[count] = '\0'; + if (strict_strtoul(buf, 0, val)) + return -EINVAL; + return 0; +} + +struct line_buffer_fq { + u32 buf[8]; + u32 buf_cnt; + int line_cnt; +}; + +static void add_to_line_buffer(struct line_buffer_fq *line_buf, u32 fqid, + struct seq_file *file) +{ + line_buf->buf[line_buf->buf_cnt] = fqid; + line_buf->buf_cnt++; + if (line_buf->buf_cnt == 8) { + /* Buffer is full, flush it */ + if (line_buf->line_cnt != 0) + seq_printf(file, ",\n"); + seq_printf(file, "0x%06x,0x%06x,0x%06x,0x%06x,0x%06x," + "0x%06x,0x%06x,0x%06x", + line_buf->buf[0], line_buf->buf[1], line_buf->buf[2], + line_buf->buf[3], line_buf->buf[4], line_buf->buf[5], + line_buf->buf[6], line_buf->buf[7]); + line_buf->buf_cnt = 0; + line_buf->line_cnt++; + } +} + +static void flush_line_buffer(struct line_buffer_fq *line_buf, + struct seq_file *file) +{ + if (line_buf->buf_cnt) { + int y = 0; + if (line_buf->line_cnt != 0) + seq_printf(file, ",\n"); + while (y != line_buf->buf_cnt) { + if (y+1 == line_buf->buf_cnt) + seq_printf(file, "0x%06x", line_buf->buf[y]); + else + seq_printf(file, "0x%06x,", line_buf->buf[y]); + y++; + } + line_buf->line_cnt++; + } + if (line_buf->line_cnt) + seq_printf(file, "\n"); +} + +static struct dentry *dfs_root; /* debugfs root directory */ + +/******************************************************************************* + * Query Frame Queue Non Programmable Fields + ******************************************************************************/ +struct query_fq_np_fields_data_s { + u32 fqid; +}; +static struct query_fq_np_fields_data_s query_fq_np_fields_data = { + .fqid = 1, +}; + +static int query_fq_np_fields_show(struct seq_file *file, void *offset) +{ + int ret; + struct qm_mcr_queryfq_np np; + struct qman_fq fq; + + fq.fqid = query_fq_np_fields_data.fqid; + ret = qman_query_fq_np(&fq, &np); + if (ret) + return ret; + /* Print state */ + seq_printf(file, "Query FQ Non Programmable Fields Result fqid 0x%x\n", + fq.fqid); + seq_printf(file, " force eligible pending: %s\n", + (np.state & QM_MCR_NP_STATE_FE) ? "yes" : "no"); + seq_printf(file, " retirement pending: %s\n", + (np.state & QM_MCR_NP_STATE_R) ? "yes" : "no"); + seq_printf(file, " state: %s\n", + state_txt[np.state & QM_MCR_NP_STATE_MASK]); + seq_printf(file, " fq_link: 0x%x\n", np.fqd_link); + seq_printf(file, " odp_seq: %u\n", np.odp_seq); + seq_printf(file, " orp_nesn: %u\n", np.orp_nesn); + seq_printf(file, " orp_ea_hseq: %u\n", np.orp_ea_hseq); + seq_printf(file, " orp_ea_tseq: %u\n", np.orp_ea_tseq); + seq_printf(file, " orp_ea_hptr: 0x%x\n", np.orp_ea_hptr); + seq_printf(file, " orp_ea_tptr: 0x%x\n", np.orp_ea_tptr); + seq_printf(file, " pfdr_hptr: 0x%x\n", np.pfdr_hptr); + seq_printf(file, " pfdr_tptr: 0x%x\n", np.pfdr_tptr); + seq_printf(file, " is: ics_surp contains a %s\n", + (np.is) ? "deficit" : "surplus"); + seq_printf(file, " ics_surp: %u\n", np.ics_surp); + seq_printf(file, " byte_cnt: %u\n", np.byte_cnt); + seq_printf(file, " frm_cnt: %u\n", np.frm_cnt); + seq_printf(file, " ra1_sfdr: 0x%x\n", np.ra1_sfdr); + seq_printf(file, " ra2_sfdr: 0x%x\n", np.ra2_sfdr); + seq_printf(file, " od1_sfdr: 0x%x\n", np.od1_sfdr); + seq_printf(file, " od2_sfdr: 0x%x\n", np.od2_sfdr); + seq_printf(file, " od3_sfdr: 0x%x\n", np.od3_sfdr); + return 0; +} + +static int query_fq_np_fields_open(struct inode *inode, + struct file *file) +{ + return single_open(file, query_fq_np_fields_show, NULL); +} + +static ssize_t query_fq_np_fields_write(struct file *f, + const char __user *buf, size_t count, loff_t *off) +{ + int ret; + unsigned long val; + + ret = user_input_convert(buf, count, &val); + if (ret) + return ret; + if (val > MAX_FQID) + return -EINVAL; + query_fq_np_fields_data.fqid = (u32)val; + return count; +} + +static const struct file_operations query_fq_np_fields_fops = { + .owner = THIS_MODULE, + .open = query_fq_np_fields_open, + .read = seq_read, + .write = query_fq_np_fields_write, + .release = single_release, +}; + +/******************************************************************************* + * Frame Queue Programmable Fields + ******************************************************************************/ +struct query_fq_fields_data_s { + u32 fqid; +}; + +static struct query_fq_fields_data_s query_fq_fields_data = { + .fqid = 1, +}; + +static int query_fq_fields_show(struct seq_file *file, void *offset) +{ + int ret; + struct qm_fqd fqd; + struct qman_fq fq; + int i = 0; + + memset(&fqd, 0, sizeof(struct qm_fqd)); + fq.fqid = query_fq_fields_data.fqid; + ret = qman_query_fq(&fq, &fqd); + if (ret) + return ret; + seq_printf(file, "Query FQ Programmable Fields Result fqid 0x%x\n", + fq.fqid); + seq_printf(file, " orprws: %u\n", fqd.orprws); + seq_printf(file, " oa: %u\n", fqd.oa); + seq_printf(file, " olws: %u\n", fqd.olws); + + seq_printf(file, " cgid: %u\n", fqd.cgid); + + if ((fqd.fq_ctrl & QM_FQCTRL_MASK) == 0) + seq_printf(file, " fq_ctrl: None\n"); + else { + i = 0; + seq_printf(file, " fq_ctrl:\n"); + while (fq_ctrl_text_list[i].txt != NULL) { + if ((fqd.fq_ctrl & QM_FQCTRL_MASK) & + fq_ctrl_text_list[i].mask) + seq_printf(file, " %s\n", + fq_ctrl_text_list[i].txt); + i++; + } + } + seq_printf(file, " dest_channel: %u\n", fqd.dest.channel); + seq_printf(file, " dest_wq: %u\n", fqd.dest.wq); + seq_printf(file, " ics_cred: %u\n", fqd.ics_cred); + seq_printf(file, " td_mant: %u\n", fqd.td.mant); + seq_printf(file, " td_exp: %u\n", fqd.td.exp); + + seq_printf(file, " ctx_b: 0x%x\n", fqd.context_b); + + seq_printf(file, " ctx_a: 0x%llx\n", qm_fqd_stashing_get64(&fqd)); + /* Any stashing configured */ + if ((fqd.context_a.stashing.exclusive & 0x7) == 0) + seq_printf(file, " ctx_a_stash_exclusive: None\n"); + else { + seq_printf(file, " ctx_a_stash_exclusive:\n"); + i = 0; + while (stashing_text_list[i].txt != NULL) { + if ((fqd.fq_ctrl & 0x7) & stashing_text_list[i].mask) + seq_printf(file, " %s\n", + stashing_text_list[i].txt); + i++; + } + } + seq_printf(file, " ctx_a_stash_annotation_cl: %u\n", + fqd.context_a.stashing.annotation_cl); + seq_printf(file, " ctx_a_stash_data_cl: %u\n", + fqd.context_a.stashing.data_cl); + seq_printf(file, " ctx_a_stash_context_cl: %u\n", + fqd.context_a.stashing.context_cl); + return 0; +} + +static int query_fq_fields_open(struct inode *inode, + struct file *file) +{ + return single_open(file, query_fq_fields_show, NULL); +} + +static ssize_t query_fq_fields_write(struct file *f, + const char __user *buf, size_t count, loff_t *off) +{ + int ret; + unsigned long val; + + ret = user_input_convert(buf, count, &val); + if (ret) + return ret; + if (val > MAX_FQID) + return -EINVAL; + query_fq_fields_data.fqid = (u32)val; + return count; +} + +static const struct file_operations query_fq_fields_fops = { + .owner = THIS_MODULE, + .open = query_fq_fields_open, + .read = seq_read, + .write = query_fq_fields_write, + .release = single_release, +}; + +/******************************************************************************* + * Query WQ lengths + ******************************************************************************/ +struct query_wq_lengths_data_s { + union { + u16 channel_wq; /* ignores wq (3 lsbits) */ + struct { + u16 id:13; /* qm_channel */ + u16 __reserved:3; + } __packed channel; + }; +}; +static struct query_wq_lengths_data_s query_wq_lengths_data; +static int query_wq_lengths_show(struct seq_file *file, void *offset) +{ + int ret; + struct qm_mcr_querywq wq; + int i; + + memset(&wq, 0, sizeof(struct qm_mcr_querywq)); + wq.channel.id = query_wq_lengths_data.channel.id; + ret = qman_query_wq(0, &wq); + if (ret) + return ret; + seq_printf(file, "Query Result For Channel: 0x%x\n", wq.channel.id); + for (i = 0; i < 8; i++) + /* mask out upper 4 bits since they are not part of length */ + seq_printf(file, " wq%d_len : %u\n", i, wq.wq_len[i] & 0x0fff); + return 0; +} + +static int query_wq_lengths_open(struct inode *inode, + struct file *file) +{ + return single_open(file, query_wq_lengths_show, NULL); +} + +static ssize_t query_wq_lengths_write(struct file *f, + const char __user *buf, size_t count, loff_t *off) +{ + int ret; + unsigned long val; + + ret = user_input_convert(buf, count, &val); + if (ret) + return ret; + if (val > 0xfff8) + return -EINVAL; + query_wq_lengths_data.channel.id = (u16)val; + return count; +} + +static const struct file_operations query_wq_lengths_fops = { + .owner = THIS_MODULE, + .open = query_wq_lengths_open, + .read = seq_read, + .write = query_wq_lengths_write, + .release = single_release, +}; + +/******************************************************************************* + * Query CGR + ******************************************************************************/ +struct query_cgr_s { + u8 cgid; +}; +static struct query_cgr_s query_cgr_data; + +static int query_cgr_show(struct seq_file *file, void *offset) +{ + int ret; + struct qm_mcr_querycgr cgrd; + struct qman_cgr cgr; + int i, j; + u32 mask; + + memset(&cgr, 0, sizeof(struct qm_mcr_querycgr)); + cgr.cgrid = query_cgr_data.cgid; + ret = qman_query_cgr(&cgr, &cgrd); + if (ret) + return ret; + seq_printf(file, "Query CGR id 0x%x\n", cgr.cgrid); + seq_printf(file, " wr_parm_g MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n", + cgrd.cgr.wr_parm_g.MA, cgrd.cgr.wr_parm_g.Mn, + cgrd.cgr.wr_parm_g.SA, cgrd.cgr.wr_parm_g.Sn, + cgrd.cgr.wr_parm_g.Pn); + + seq_printf(file, " wr_parm_y MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n", + cgrd.cgr.wr_parm_y.MA, cgrd.cgr.wr_parm_y.Mn, + cgrd.cgr.wr_parm_y.SA, cgrd.cgr.wr_parm_y.Sn, + cgrd.cgr.wr_parm_y.Pn); + + seq_printf(file, " wr_parm_r MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n", + cgrd.cgr.wr_parm_r.MA, cgrd.cgr.wr_parm_r.Mn, + cgrd.cgr.wr_parm_r.SA, cgrd.cgr.wr_parm_r.Sn, + cgrd.cgr.wr_parm_r.Pn); + + seq_printf(file, " wr_en_g: %u, wr_en_y: %u, we_en_r: %u\n", + cgrd.cgr.wr_en_g, cgrd.cgr.wr_en_y, cgrd.cgr.wr_en_r); + + seq_printf(file, " cscn_en: %u\n", cgrd.cgr.cscn_en); + if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) { + seq_printf(file, " cscn_targ_dcp:\n"); + mask = 0x80000000; + for (i = 0; i < 32; i++) { + if (cgrd.cgr.cscn_targ & mask) + seq_printf(file, " send CSCN to dcp %u\n", + (31 - i)); + mask >>= 1; + } + + seq_printf(file, " cscn_targ_swp:\n"); + for (i = 0; i < 4; i++) { + mask = 0x80000000; + for (j = 0; j < 32; j++) { + if (cgrd.cscn_targ_swp[i] & mask) + seq_printf(file, " send CSCN to swp" + " %u\n", (127 - (i * 32) - j)); + mask >>= 1; + } + } + } else { + seq_printf(file, " cscn_targ: %u\n", cgrd.cgr.cscn_targ); + } + seq_printf(file, " cstd_en: %u\n", cgrd.cgr.cstd_en); + seq_printf(file, " cs: %u\n", cgrd.cgr.cs); + + seq_printf(file, " cs_thresh_TA: %u, cs_thresh_Tn: %u\n", + cgrd.cgr.cs_thres.TA, cgrd.cgr.cs_thres.Tn); + + if (qman_ip_rev != QMAN_REV10) { + seq_printf(file, " mode: %s\n", + (cgrd.cgr.mode & QMAN_CGR_MODE_FRAME) ? + "frame count" : "byte count"); + } + seq_printf(file, " i_bcnt: %llu\n", qm_mcr_querycgr_i_get64(&cgrd)); + seq_printf(file, " a_bcnt: %llu\n", qm_mcr_querycgr_a_get64(&cgrd)); + + return 0; +} + +static int query_cgr_open(struct inode *inode, struct file *file) +{ + return single_open(file, query_cgr_show, NULL); +} + +static ssize_t query_cgr_write(struct file *f, const char __user *buf, + size_t count, loff_t *off) +{ + int ret; + unsigned long val; + + ret = user_input_convert(buf, count, &val); + if (ret) + return ret; + if (val > 0xff) + return -EINVAL; + query_cgr_data.cgid = (u8)val; + return count; +} + +static const struct file_operations query_cgr_fops = { + .owner = THIS_MODULE, + .open = query_cgr_open, + .read = seq_read, + .write = query_cgr_write, + .release = single_release, +}; + +/******************************************************************************* + * Test Write CGR + ******************************************************************************/ +struct test_write_cgr_s { + u64 i_bcnt; + u8 cgid; +}; +static struct test_write_cgr_s test_write_cgr_data; + +static int testwrite_cgr_show(struct seq_file *file, void *offset) +{ + int ret; + struct qm_mcr_cgrtestwrite result; + struct qman_cgr cgr; + u64 i_bcnt; + + memset(&cgr, 0, sizeof(struct qman_cgr)); + memset(&result, 0, sizeof(struct qm_mcr_cgrtestwrite)); + cgr.cgrid = test_write_cgr_data.cgid; + i_bcnt = test_write_cgr_data.i_bcnt; + ret = qman_testwrite_cgr(&cgr, i_bcnt, &result); + if (ret) + return ret; + seq_printf(file, "CGR Test Write CGR id 0x%x\n", cgr.cgrid); + seq_printf(file, " wr_parm_g MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n", + result.cgr.wr_parm_g.MA, result.cgr.wr_parm_g.Mn, + result.cgr.wr_parm_g.SA, result.cgr.wr_parm_g.Sn, + result.cgr.wr_parm_g.Pn); + seq_printf(file, " wr_parm_y MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n", + result.cgr.wr_parm_y.MA, result.cgr.wr_parm_y.Mn, + result.cgr.wr_parm_y.SA, result.cgr.wr_parm_y.Sn, + result.cgr.wr_parm_y.Pn); + seq_printf(file, " wr_parm_r MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n", + result.cgr.wr_parm_r.MA, result.cgr.wr_parm_r.Mn, + result.cgr.wr_parm_r.SA, result.cgr.wr_parm_r.Sn, + result.cgr.wr_parm_r.Pn); + seq_printf(file, " wr_en_g: %u, wr_en_y: %u, we_en_r: %u\n", + result.cgr.wr_en_g, result.cgr.wr_en_y, result.cgr.wr_en_r); + seq_printf(file, " cscn_en: %u\n", result.cgr.cscn_en); + seq_printf(file, " cscn_targ: %u\n", result.cgr.cscn_targ); + seq_printf(file, " cstd_en: %u\n", result.cgr.cstd_en); + seq_printf(file, " cs: %u\n", result.cgr.cs); + seq_printf(file, " cs_thresh_TA: %u, cs_thresh_Tn: %u\n", + result.cgr.cs_thres.TA, result.cgr.cs_thres.Tn); + + /* Add Mode for Si 2 */ + if (qman_ip_rev != QMAN_REV10) { + seq_printf(file, " mode: %s\n", + (result.cgr.mode & QMAN_CGR_MODE_FRAME) ? + "frame count" : "byte count"); + } + + seq_printf(file, " i_bcnt: %llu\n", + qm_mcr_cgrtestwrite_i_get64(&result)); + seq_printf(file, " a_bcnt: %llu\n", + qm_mcr_cgrtestwrite_a_get64(&result)); + seq_printf(file, " wr_prob_g: %u\n", result.wr_prob_g); + seq_printf(file, " wr_prob_y: %u\n", result.wr_prob_y); + seq_printf(file, " wr_prob_r: %u\n", result.wr_prob_r); + return 0; +} + +static int testwrite_cgr_open(struct inode *inode, struct file *file) +{ + return single_open(file, testwrite_cgr_show, NULL); +} + +static const struct file_operations testwrite_cgr_fops = { + .owner = THIS_MODULE, + .open = testwrite_cgr_open, + .read = seq_read, + .release = single_release, +}; + + +static int testwrite_cgr_ibcnt_show(struct seq_file *file, void *offset) +{ + seq_printf(file, "i_bcnt: %llu\n", test_write_cgr_data.i_bcnt); + return 0; +} +static int testwrite_cgr_ibcnt_open(struct inode *inode, struct file *file) +{ + return single_open(file, testwrite_cgr_ibcnt_show, NULL); +} + +static ssize_t testwrite_cgr_ibcnt_write(struct file *f, const char __user *buf, + size_t count, loff_t *off) +{ + int ret; + unsigned long val; + + ret = user_input_convert(buf, count, &val); + if (ret) + return ret; + test_write_cgr_data.i_bcnt = val; + return count; +} + +static const struct file_operations teswrite_cgr_ibcnt_fops = { + .owner = THIS_MODULE, + .open = testwrite_cgr_ibcnt_open, + .read = seq_read, + .write = testwrite_cgr_ibcnt_write, + .release = single_release, +}; + +static int testwrite_cgr_cgrid_show(struct seq_file *file, void *offset) +{ + seq_printf(file, "cgrid: %u\n", (u32)test_write_cgr_data.cgid); + return 0; +} +static int testwrite_cgr_cgrid_open(struct inode *inode, struct file *file) +{ + return single_open(file, testwrite_cgr_cgrid_show, NULL); +} + +static ssize_t testwrite_cgr_cgrid_write(struct file *f, const char __user *buf, + size_t count, loff_t *off) +{ + int ret; + unsigned long val; + + ret = user_input_convert(buf, count, &val); + if (ret) + return ret; + if (val > 0xff) + return -EINVAL; + test_write_cgr_data.cgid = (u8)val; + return count; +} + +static const struct file_operations teswrite_cgr_cgrid_fops = { + .owner = THIS_MODULE, + .open = testwrite_cgr_cgrid_open, + .read = seq_read, + .write = testwrite_cgr_cgrid_write, + .release = single_release, +}; + +/******************************************************************************* + * Query Congestion State + ******************************************************************************/ +static int query_congestion_show(struct seq_file *file, void *offset) +{ + int ret; + struct qm_mcr_querycongestion cs; + int i, j, in_cong = 0; + u32 mask; + + memset(&cs, 0, sizeof(struct qm_mcr_querycongestion)); + ret = qman_query_congestion(&cs); + if (ret) + return ret; + seq_printf(file, "Query Congestion Result\n"); + for (i = 0; i < 8; i++) { + mask = 0x80000000; + for (j = 0; j < 32; j++) { + if (cs.state.__state[i] & mask) { + in_cong = 1; + seq_printf(file, " cg %u: %s\n", (i*32)+j, + "in congestion"); + } + mask >>= 1; + } + } + if (!in_cong) + seq_printf(file, " All congestion groups not congested.\n"); + return 0; +} + +static int query_congestion_open(struct inode *inode, struct file *file) +{ + return single_open(file, query_congestion_show, NULL); +} + +static const struct file_operations query_congestion_fops = { + .owner = THIS_MODULE, + .open = query_congestion_open, + .read = seq_read, + .release = single_release, +}; + +/******************************************************************************* + * QMan register + ******************************************************************************/ +struct qman_register_s { + u32 val; +}; +static struct qman_register_s qman_register_data; + +static void init_ccsrmempeek(void) +{ + struct device_node *dn; + const u32 *regaddr_p; + + dn = of_find_compatible_node(NULL, NULL, "fsl,qman"); + if (!dn) { + pr_info("No fsl,qman node\n"); + return; + } + regaddr_p = of_get_address(dn, 0, &qman_ccsr_size, NULL); + if (!regaddr_p) { + of_node_put(dn); + return; + } + qman_ccsr_start = of_translate_address(dn, regaddr_p); + of_node_put(dn); +} +/* This function provides access to QMan ccsr memory map */ +static int qman_ccsrmempeek(u32 *val, u32 offset) +{ + void __iomem *addr; + u64 phys_addr; + + if (!qman_ccsr_start) + return -EINVAL; + + if (offset > (qman_ccsr_size - sizeof(u32))) + return -EINVAL; + + phys_addr = qman_ccsr_start + offset; + addr = ioremap(phys_addr, sizeof(u32)); + if (!addr) { + pr_err("ccsrmempeek, ioremap failed\n"); + return -EINVAL; + } + *val = in_be32(addr); + iounmap(addr); + return 0; +} + +static int qman_ccsrmempeek_show(struct seq_file *file, void *offset) +{ + u32 b; + + qman_ccsrmempeek(&b, qman_register_data.val); + seq_printf(file, "QMan register offset = 0x%x\n", + qman_register_data.val); + seq_printf(file, "value = 0x%08x\n", b); + + return 0; +} + +static int qman_ccsrmempeek_open(struct inode *inode, struct file *file) +{ + return single_open(file, qman_ccsrmempeek_show, NULL); +} + +static ssize_t qman_ccsrmempeek_write(struct file *f, const char __user *buf, + size_t count, loff_t *off) +{ + int ret; + unsigned long val; + + ret = user_input_convert(buf, count, &val); + if (ret) + return ret; + /* multiple of 4 */ + if (val > (qman_ccsr_size - sizeof(u32))) { + pr_info("Input 0x%lx > 0x%llx\n", + val, (qman_ccsr_size - sizeof(u32))); + return -EINVAL; + } + if (val & 0x3) { + pr_info("Input 0x%lx not multiple of 4\n", val); + return -EINVAL; + } + qman_register_data.val = val; + return count; +} + +static const struct file_operations qman_ccsrmempeek_fops = { + .owner = THIS_MODULE, + .open = qman_ccsrmempeek_open, + .read = seq_read, + .write = qman_ccsrmempeek_write, +}; + +/******************************************************************************* + * QMan state + ******************************************************************************/ +static int qman_fqd_state_show(struct seq_file *file, void *offset) +{ + struct qm_mcr_queryfq_np np; + struct qman_fq fq; + struct line_buffer_fq line_buf; + int ret, i; + u8 *state = file->private; + u32 qm_fq_state_cnt[fqd_states_count]; + + memset(qm_fq_state_cnt, 0, sizeof(qm_fq_state_cnt)); + memset(&line_buf, 0, sizeof(line_buf)); + + seq_printf(file, "List of fq ids in state: %s\n", state_txt[*state]); + + for (i = 1; i < fqid_max; i++) { + fq.fqid = i; + ret = qman_query_fq_np(&fq, &np); + if (ret) + return ret; + if (*state == (np.state & QM_MCR_NP_STATE_MASK)) + add_to_line_buffer(&line_buf, fq.fqid, file); + /* Keep a summary count of all states */ + if ((np.state & QM_MCR_NP_STATE_MASK) < fqd_states_count) + qm_fq_state_cnt[(np.state & QM_MCR_NP_STATE_MASK)]++; + } + flush_line_buffer(&line_buf, file); + + for (i = 0; i < fqd_states_count; i++) { + seq_printf(file, "%s count = %u\n", state_txt[i], + qm_fq_state_cnt[i]); + } + return 0; +} + +static int qman_fqd_state_open(struct inode *inode, struct file *file) +{ + return single_open(file, qman_fqd_state_show, inode->i_private); +} + +static const struct file_operations qman_fqd_state_fops = { + .owner = THIS_MODULE, + .open = qman_fqd_state_open, + .read = seq_read, +}; + +static int qman_fqd_ctrl_show(struct seq_file *file, void *offset) +{ + struct qm_fqd fqd; + struct qman_fq fq; + u32 fq_en_cnt = 0, fq_di_cnt = 0; + int ret, i; + struct mask_filter_s *data = file->private; + const char *ctrl_txt = get_fqd_ctrl_text(data->mask); + struct line_buffer_fq line_buf; + + memset(&line_buf, 0, sizeof(line_buf)); + seq_printf(file, "List of fq ids with: %s :%s\n", + ctrl_txt, (data->filter) ? "enabled" : "disabled"); + for (i = 1; i < fqid_max; i++) { + fq.fqid = i; + memset(&fqd, 0, sizeof(struct qm_fqd)); + ret = qman_query_fq(&fq, &fqd); + if (ret) + return ret; + if (data->filter) { + if (fqd.fq_ctrl & data->mask) + add_to_line_buffer(&line_buf, fq.fqid, file); + } else { + if (!(fqd.fq_ctrl & data->mask)) + add_to_line_buffer(&line_buf, fq.fqid, file); + } + if (fqd.fq_ctrl & data->mask) + fq_en_cnt++; + else + fq_di_cnt++; + } + flush_line_buffer(&line_buf, file); + + seq_printf(file, "Total FQD with: %s : enabled = %u\n", + ctrl_txt, fq_en_cnt); + seq_printf(file, "Total FQD with: %s : disabled = %u\n", + ctrl_txt, fq_di_cnt); + return 0; +} + +/******************************************************************************* + * QMan ctrl CGE, TDE, ORP, CTX, CPC, SFDR, BLOCK, HOLD, CACHE + ******************************************************************************/ +static int qman_fqd_ctrl_open(struct inode *inode, struct file *file) +{ + return single_open(file, qman_fqd_ctrl_show, inode->i_private); +} + +static const struct file_operations qman_fqd_ctrl_fops = { + .owner = THIS_MODULE, + .open = qman_fqd_ctrl_open, + .read = seq_read, +}; + +/******************************************************************************* + * QMan ctrl summary + ******************************************************************************/ +/******************************************************************************* + * QMan summary state + ******************************************************************************/ +static int qman_fqd_non_prog_summary_show(struct seq_file *file, void *offset) +{ + struct qm_mcr_queryfq_np np; + struct qman_fq fq; + int ret, i; + u32 qm_fq_state_cnt[fqd_states_count]; + + memset(qm_fq_state_cnt, 0, sizeof(qm_fq_state_cnt)); + + for (i = 1; i < fqid_max; i++) { + fq.fqid = i; + ret = qman_query_fq_np(&fq, &np); + if (ret) + return ret; + /* Keep a summary count of all states */ + if ((np.state & QM_MCR_NP_STATE_MASK) < fqd_states_count) + qm_fq_state_cnt[(np.state & QM_MCR_NP_STATE_MASK)]++; + } + + for (i = 0; i < fqd_states_count; i++) { + seq_printf(file, "%s count = %u\n", state_txt[i], + qm_fq_state_cnt[i]); + } + return 0; +} + +static int qman_fqd_prog_summary_show(struct seq_file *file, void *offset) +{ + struct qm_fqd fqd; + struct qman_fq fq; + int ret, i , j; + u32 qm_prog_cnt[mask_filter_count/2]; + + memset(qm_prog_cnt, 0, sizeof(qm_prog_cnt)); + + for (i = 1; i < fqid_max; i++) { + memset(&fqd, 0, sizeof(struct qm_fqd)); + fq.fqid = i; + ret = qman_query_fq(&fq, &fqd); + if (ret) + return ret; + /* Keep a summary count of all states */ + for (j = 0; j < mask_filter_count; j += 2) + if ((fqd.fq_ctrl & QM_FQCTRL_MASK) & + mask_filter[j].mask) + qm_prog_cnt[j/2]++; + } + for (i = 0; i < mask_filter_count/2; i++) { + seq_printf(file, "%s count = %u\n", + get_fqd_ctrl_text(mask_filter[i*2].mask), + qm_prog_cnt[i]); + } + return 0; +} + +static int qman_fqd_summary_show(struct seq_file *file, void *offset) +{ + int ret; + + /* Display summary of non programmable fields */ + ret = qman_fqd_non_prog_summary_show(file, offset); + if (ret) + return ret; + seq_printf(file, "-----------------------------------------\n"); + /* Display programmable fields */ + ret = qman_fqd_prog_summary_show(file, offset); + if (ret) + return ret; + return 0; +} + +static int qman_fqd_summary_open(struct inode *inode, struct file *file) +{ + return single_open(file, qman_fqd_summary_show, NULL); +} + +static const struct file_operations qman_fqd_summary_fops = { + .owner = THIS_MODULE, + .open = qman_fqd_summary_open, + .read = seq_read, +}; + +/******************************************************************************* + * QMan destination work queue + ******************************************************************************/ +struct qman_dest_wq_s { + u16 wq_id; +}; +static struct qman_dest_wq_s qman_dest_wq_data = { + .wq_id = 0, +}; + +static int qman_fqd_dest_wq_show(struct seq_file *file, void *offset) +{ + struct qm_fqd fqd; + struct qman_fq fq; + int ret, i; + u16 *wq, wq_id = qman_dest_wq_data.wq_id; + struct line_buffer_fq line_buf; + + memset(&line_buf, 0, sizeof(line_buf)); + /* use vmalloc : need to allocate large memory region and don't + * require the memory to be physically contiguous. */ + wq = vmalloc(sizeof(u16) * (0xFFFF+1)); + if (!wq) + return -ENOMEM; + memset(wq, 0, sizeof(u16) * (0xFFFF+1)); + + seq_printf(file, "List of fq ids with destination work queue id" + " = 0x%x\n", wq_id); + + for (i = 1; i < fqid_max; i++) { + fq.fqid = i; + memset(&fqd, 0, sizeof(struct qm_fqd)); + ret = qman_query_fq(&fq, &fqd); + if (ret) { + vfree(wq); + return ret; + } + if (wq_id == fqd.dest_wq) + add_to_line_buffer(&line_buf, fq.fqid, file); + wq[fqd.dest_wq]++; + } + flush_line_buffer(&line_buf, file); + + seq_printf(file, "Summary of all FQD destination work queue values\n"); + for (i = 0; i < 0xFFFF; i++) { + if (wq[i]) + seq_printf(file, "Channel: 0x%x WQ: 0x%x WQ_ID: 0x%x, " + "count = %u\n", i >> 3, i & 0x3, i, wq[i]); + } + vfree(wq); + return 0; +} + +static ssize_t qman_fqd_dest_wq_write(struct file *f, const char __user *buf, + size_t count, loff_t *off) +{ + int ret; + unsigned long val; + + ret = user_input_convert(buf, count, &val); + if (ret) + return ret; + if (val > 0xFFFF) + return -EINVAL; + qman_dest_wq_data.wq_id = val; + return count; +} + +static int qman_fqd_dest_wq_open(struct inode *inode, struct file *file) +{ + return single_open(file, qman_fqd_dest_wq_show, NULL); +} + +static const struct file_operations qman_fqd_dest_wq_fops = { + .owner = THIS_MODULE, + .open = qman_fqd_dest_wq_open, + .read = seq_read, + .write = qman_fqd_dest_wq_write, +}; + +/******************************************************************************* + * QMan Intra-Class Scheduling Credit + ******************************************************************************/ +static int qman_fqd_cred_show(struct seq_file *file, void *offset) +{ + struct qm_fqd fqd; + struct qman_fq fq; + int ret, i; + u32 fq_cnt = 0; + struct line_buffer_fq line_buf; + + memset(&line_buf, 0, sizeof(line_buf)); + seq_printf(file, "List of fq ids with Intra-Class Scheduling Credit > 0" + "\n"); + + for (i = 1; i < fqid_max; i++) { + fq.fqid = i; + memset(&fqd, 0, sizeof(struct qm_fqd)); + ret = qman_query_fq(&fq, &fqd); + if (ret) + return ret; + if (fqd.ics_cred > 0) { + add_to_line_buffer(&line_buf, fq.fqid, file); + fq_cnt++; + } + } + flush_line_buffer(&line_buf, file); + + seq_printf(file, "Total FQD with ics_cred > 0 = %d\n", fq_cnt); + return 0; +} + +static int qman_fqd_cred_open(struct inode *inode, struct file *file) +{ + return single_open(file, qman_fqd_cred_show, NULL); +} + +static const struct file_operations qman_fqd_cred_fops = { + .owner = THIS_MODULE, + .open = qman_fqd_cred_open, + .read = seq_read, +}; + +/******************************************************************************* + * Class Queue Fields + ******************************************************************************/ +struct query_cq_fields_data_s { + u32 cqid; +}; + +static struct query_cq_fields_data_s query_cq_fields_data = { + .cqid = 1, +}; + +static int query_cq_fields_show(struct seq_file *file, void *offset) +{ + int ret; + struct qm_mcr_ceetm_cq_query query_result; + unsigned int cqid; + + cqid = query_cq_fields_data.cqid; + ret = qman_ceetm_query_cq(cqid, 0, &query_result); + if (ret) + return ret; + seq_printf(file, "Query CQ Fields Result cqid 0x%x\n", cqid); + seq_printf(file, " ccgid: %u\n", query_result.ccgid); + seq_printf(file, " state: %u\n", query_result.state); + seq_printf(file, " pfdr_hptr: %u\n", query_result.pfdr_hptr); + seq_printf(file, " pfdr_tptr: %u\n", query_result.pfdr_tptr); + seq_printf(file, " od1_xsfdr: %u\n", query_result.od1_xsfdr); + seq_printf(file, " od2_xsfdr: %u\n", query_result.od2_xsfdr); + seq_printf(file, " od3_xsfdr: %u\n", query_result.od3_xsfdr); + seq_printf(file, " od4_xsfdr: %u\n", query_result.od4_xsfdr); + seq_printf(file, " od5_xsfdr: %u\n", query_result.od5_xsfdr); + seq_printf(file, " od6_xsfdr: %u\n", query_result.od6_xsfdr); + seq_printf(file, " ra1_xsfdr: %u\n", query_result.ra1_xsfdr); + seq_printf(file, " ra2_xsfdr: %u\n", query_result.ra2_xsfdr); + seq_printf(file, " frame_count: %u\n", query_result.frm_cnt); + + return 0; +} + +static int query_cq_fields_open(struct inode *inode, + struct file *file) +{ + return single_open(file, query_cq_fields_show, NULL); +} + +static ssize_t query_cq_fields_write(struct file *f, + const char __user *buf, size_t count, loff_t *off) +{ + int ret; + unsigned long val; + + ret = user_input_convert(buf, count, &val); + if (ret) + return ret; + if (val > MAX_FQID) + return -EINVAL; + query_cq_fields_data.cqid = (u32)val; + return count; +} + +static const struct file_operations query_cq_fields_fops = { + .owner = THIS_MODULE, + .open = query_cq_fields_open, + .read = seq_read, + .write = query_cq_fields_write, + .release = single_release, +}; +/* helper macros used in qman_debugfs_module_init */ +#define QMAN_DBGFS_ENTRY(name, mode, parent, data, fops) \ + do { \ + d = debugfs_create_file(name, \ + mode, parent, \ + data, \ + fops); \ + if (d == NULL) { \ + ret = -ENOMEM; \ + goto _return; \ + } \ + } while (0) + +/* dfs_root as parent */ +#define QMAN_DBGFS_ENTRY_ROOT(name, mode, data, fops) \ + QMAN_DBGFS_ENTRY(name, mode, dfs_root, data, fops) + +/* fqd_root as parent */ +#define QMAN_DBGFS_ENTRY_FQDROOT(name, mode, data, fops) \ + QMAN_DBGFS_ENTRY(name, mode, fqd_root, data, fops) + +/* fqd state */ +#define QMAN_DBGFS_ENTRY_FQDSTATE(name, index) \ + QMAN_DBGFS_ENTRY_FQDROOT(name, S_IRUGO, \ + (void *)&mask_filter[index], &qman_fqd_ctrl_fops) + +static int __init qman_debugfs_module_init(void) +{ + int ret = 0; + struct dentry *d, *fqd_root; + u32 reg; + + fqid_max = 0; + init_ccsrmempeek(); + if (qman_ccsr_start) { + if (!qman_ccsrmempeek(®, QM_FQD_AR)) { + /* extract the size of the FQD window */ + reg = reg & 0x3f; + /* calculate valid frame queue descriptor range */ + fqid_max = (1 << (reg + 1)) / QM_FQD_BLOCK_SIZE; + } + } + dfs_root = debugfs_create_dir("qman", NULL); + fqd_root = debugfs_create_dir("fqd", dfs_root); + if (dfs_root == NULL || fqd_root == NULL) { + ret = -ENOMEM; + pr_err("Cannot create qman/fqd debugfs dir\n"); + goto _return; + } + if (fqid_max) { + QMAN_DBGFS_ENTRY_ROOT("ccsrmempeek", S_IRUGO | S_IWUGO, + NULL, &qman_ccsrmempeek_fops); + } + QMAN_DBGFS_ENTRY_ROOT("query_fq_np_fields", S_IRUGO | S_IWUGO, + &query_fq_np_fields_data, &query_fq_np_fields_fops); + + QMAN_DBGFS_ENTRY_ROOT("query_fq_fields", S_IRUGO | S_IWUGO, + &query_fq_fields_data, &query_fq_fields_fops); + + QMAN_DBGFS_ENTRY_ROOT("query_wq_lengths", S_IRUGO | S_IWUGO, + &query_wq_lengths_data, &query_wq_lengths_fops); + + QMAN_DBGFS_ENTRY_ROOT("query_cgr", S_IRUGO | S_IWUGO, + &query_cgr_data, &query_cgr_fops); + + QMAN_DBGFS_ENTRY_ROOT("query_congestion", S_IRUGO, + NULL, &query_congestion_fops); + + QMAN_DBGFS_ENTRY_ROOT("testwrite_cgr", S_IRUGO, + NULL, &testwrite_cgr_fops); + + QMAN_DBGFS_ENTRY_ROOT("testwrite_cgr_cgrid", S_IRUGO | S_IWUGO, + NULL, &teswrite_cgr_cgrid_fops); + + QMAN_DBGFS_ENTRY_ROOT("testwrite_cgr_ibcnt", S_IRUGO | S_IWUGO, + NULL, &teswrite_cgr_ibcnt_fops); + + /* Create files with fqd_root as parent */ + + QMAN_DBGFS_ENTRY_FQDROOT("stateoos", S_IRUGO, + (void *)&fqd_states[QM_MCR_NP_STATE_OOS], &qman_fqd_state_fops); + + QMAN_DBGFS_ENTRY_FQDROOT("state_retired", S_IRUGO, + (void *)&fqd_states[QM_MCR_NP_STATE_RETIRED], + &qman_fqd_state_fops); + + QMAN_DBGFS_ENTRY_FQDROOT("state_tentatively_sched", S_IRUGO, + (void *)&fqd_states[QM_MCR_NP_STATE_TEN_SCHED], + &qman_fqd_state_fops); + + QMAN_DBGFS_ENTRY_FQDROOT("state_truly_sched", S_IRUGO, + (void *)&fqd_states[QM_MCR_NP_STATE_TRU_SCHED], + &qman_fqd_state_fops); + + QMAN_DBGFS_ENTRY_FQDROOT("state_parked", S_IRUGO, + (void *)&fqd_states[QM_MCR_NP_STATE_PARKED], + &qman_fqd_state_fops); + + QMAN_DBGFS_ENTRY_FQDROOT("state_active", S_IRUGO, + (void *)&fqd_states[QM_MCR_NP_STATE_ACTIVE], + &qman_fqd_state_fops); + QMAN_DBGFS_ENTRY_ROOT("query_cq_fields", S_IRUGO | S_IWUGO, + &query_cq_fields_data, &query_cq_fields_fops); + + + QMAN_DBGFS_ENTRY_FQDSTATE("cge_enable", 17); + + QMAN_DBGFS_ENTRY_FQDSTATE("cge_disable", 16); + + QMAN_DBGFS_ENTRY_FQDSTATE("tde_enable", 15); + + QMAN_DBGFS_ENTRY_FQDSTATE("tde_disable", 14); + + QMAN_DBGFS_ENTRY_FQDSTATE("orp_enable", 13); + + QMAN_DBGFS_ENTRY_FQDSTATE("orp_disable", 12); + + QMAN_DBGFS_ENTRY_FQDSTATE("ctx_a_stashing_enable", 11); + + QMAN_DBGFS_ENTRY_FQDSTATE("ctx_a_stashing_disable", 10); + + QMAN_DBGFS_ENTRY_FQDSTATE("cpc_enable", 9); + + QMAN_DBGFS_ENTRY_FQDSTATE("cpc_disable", 8); + + QMAN_DBGFS_ENTRY_FQDSTATE("sfdr_enable", 7); + + QMAN_DBGFS_ENTRY_FQDSTATE("sfdr_disable", 6); + + QMAN_DBGFS_ENTRY_FQDSTATE("avoid_blocking_enable", 5); + + QMAN_DBGFS_ENTRY_FQDSTATE("avoid_blocking_disable", 4); + + QMAN_DBGFS_ENTRY_FQDSTATE("hold_active_enable", 3); + + QMAN_DBGFS_ENTRY_FQDSTATE("hold_active_disable", 2); + + QMAN_DBGFS_ENTRY_FQDSTATE("prefer_in_cache_enable", 1); + + QMAN_DBGFS_ENTRY_FQDSTATE("prefer_in_cache_disable", 0); + + QMAN_DBGFS_ENTRY_FQDROOT("summary", S_IRUGO, + NULL, &qman_fqd_summary_fops); + + QMAN_DBGFS_ENTRY_FQDROOT("wq", S_IRUGO | S_IWUGO, + NULL, &qman_fqd_dest_wq_fops); + + QMAN_DBGFS_ENTRY_FQDROOT("cred", S_IRUGO, + NULL, &qman_fqd_cred_fops); + + return 0; + +_return: + if (dfs_root) + debugfs_remove_recursive(dfs_root); + return ret; +} + +static void __exit qman_debugfs_module_exit(void) +{ + debugfs_remove_recursive(dfs_root); +} + +module_init(qman_debugfs_module_init); +module_exit(qman_debugfs_module_exit); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/staging/fsl_qbman/qman_driver.c b/drivers/staging/fsl_qbman/qman_driver.c new file mode 100644 index 0000000..07d079d --- /dev/null +++ b/drivers/staging/fsl_qbman/qman_driver.c @@ -0,0 +1,656 @@ +/* Copyright 2008-2012 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "qman_private.h" +#ifdef CONFIG_FSL_PAMU +#include +#endif + +/* Global variable containing revision id (even on non-control plane systems + * where CCSR isn't available) */ +u16 qman_ip_rev; +EXPORT_SYMBOL(qman_ip_rev); +u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1; +EXPORT_SYMBOL(qm_channel_pool1); +u16 qm_channel_caam = QMAN_CHANNEL_CAAM; +EXPORT_SYMBOL(qm_channel_caam); +u16 qm_channel_pme = QMAN_CHANNEL_PME; +EXPORT_SYMBOL(qm_channel_pme); +u16 qman_portal_max; + +u32 qman_clk; +struct qm_ceetm qman_ceetms[QMAN_CEETM_MAX]; + +/* size of the fqd region in bytes */ +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP +static u32 fqd_size = (PAGE_SIZE << CONFIG_FSL_QMAN_FQD_SZ); +#endif + +/* For these variables, and the portal-initialisation logic, the + * comments in bman_driver.c apply here so won't be repeated. */ +static struct qman_portal *shared_portals[NR_CPUS]; +static int num_shared_portals; +static int shared_portals_idx; + +/* A SDQCR mask comprising all the available/visible pool channels */ +static u32 pools_sdqcr; + +#define STR_ERR_NOPROP "No '%s' property in node %s\n" +#define STR_ERR_CELL "'%s' is not a %d-cell range in node %s\n" +#define STR_FQID_RANGE "fsl,fqid-range" +#define STR_POOL_CHAN_RANGE "fsl,pool-channel-range" +#define STR_CGRID_RANGE "fsl,cgrid-range" + +/* A "fsl,fqid-range" node; release the given range to the allocator */ +static __init int fsl_fqid_range_init(struct device_node *node) +{ + int ret; + const u32 *range = of_get_property(node, STR_FQID_RANGE, &ret); + if (!range) { + pr_err(STR_ERR_NOPROP, STR_FQID_RANGE, node->full_name); + return -EINVAL; + } + if (ret != 8) { + pr_err(STR_ERR_CELL, STR_FQID_RANGE, 2, node->full_name); + return -EINVAL; + } + qman_release_fqid_range(range[0], range[1]); + pr_info("Qman: FQID allocator includes range %d:%d\n", + range[0], range[1]); + return 0; +} + +/* A "fsl,pool-channel-range" node; add to the SDQCR mask only */ +static __init int fsl_pool_channel_range_sdqcr(struct device_node *node) +{ + int ret; + const u32 *chanid = of_get_property(node, STR_POOL_CHAN_RANGE, &ret); + if (!chanid) { + pr_err(STR_ERR_NOPROP, STR_POOL_CHAN_RANGE, node->full_name); + return -EINVAL; + } + if (ret != 8) { + pr_err(STR_ERR_CELL, STR_POOL_CHAN_RANGE, 1, node->full_name); + return -EINVAL; + } + for (ret = 0; ret < chanid[1]; ret++) + pools_sdqcr |= QM_SDQCR_CHANNELS_POOL_CONV(chanid[0] + ret); + return 0; +} + +/* A "fsl,pool-channel-range" node; release the given range to the allocator */ +static __init int fsl_pool_channel_range_init(struct device_node *node) +{ + int ret; + const u32 *chanid = of_get_property(node, STR_POOL_CHAN_RANGE, &ret); + if (!chanid) { + pr_err(STR_ERR_NOPROP, STR_POOL_CHAN_RANGE, node->full_name); + return -EINVAL; + } + if (ret != 8) { + pr_err(STR_ERR_CELL, STR_POOL_CHAN_RANGE, 1, node->full_name); + return -EINVAL; + } + qman_release_pool_range(chanid[0], chanid[1]); + pr_info("Qman: pool channel allocator includes range %d:%d\n", + chanid[0], chanid[1]); + return 0; +} + +/* A "fsl,cgrid-range" node; release the given range to the allocator */ +static __init int fsl_cgrid_range_init(struct device_node *node) +{ + struct qman_cgr cgr; + int ret, errors = 0; + const u32 *range = of_get_property(node, STR_CGRID_RANGE, &ret); + if (!range) { + pr_err(STR_ERR_NOPROP, STR_CGRID_RANGE, node->full_name); + return -EINVAL; + } + if (ret != 8) { + pr_err(STR_ERR_CELL, STR_CGRID_RANGE, 2, node->full_name); + return -EINVAL; + } + qman_release_cgrid_range(range[0], range[1]); + pr_info("Qman: CGRID allocator includes range %d:%d\n", + range[0], range[1]); + for (cgr.cgrid = 0; cgr.cgrid < __CGR_NUM; cgr.cgrid++) { + ret = qman_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL); + if (ret) + errors++; + } + if (errors) + pr_err("Warning: %d error%s while initialising CGRs %d:%d\n", + errors, (errors > 1) ? "s" : "", range[0], range[1]); + return 0; +} + +static __init int fsl_ceetm_init(struct device_node *node) +{ + enum qm_dc_portal dcp_portal; + struct qm_ceetm_sp *sp; + struct qm_ceetm_lni *lni; + int ret, i; + const u32 *range; + + /* Find LFQID range */ + range = of_get_property(node, "fsl,ceetm-lfqid-range", &ret); + if (!range) { + pr_err("No fsl,ceetm-lfqid-range in node %s\n", + node->full_name); + return -EINVAL; + } + if (ret != 8) { + pr_err("fsl,ceetm-lfqid-range is not a 2-cell range in node" + " %s\n", node->full_name); + return -EINVAL; + } + + dcp_portal = (range[0] & 0x0F0000) >> 16; + if (dcp_portal > qm_dc_portal_fman1) { + pr_err("The DCP portal %d doesn't support CEETM\n", dcp_portal); + return -EINVAL; + } + + if (dcp_portal == qm_dc_portal_fman0) + qman_release_ceetm0_lfqid_range(range[0], range[1]); + if (dcp_portal == qm_dc_portal_fman1) + qman_release_ceetm1_lfqid_range(range[0], range[1]); + pr_debug("Qman: The lfqid allocator of CEETM %d includes range" + " 0x%x:0x%x\n", dcp_portal, range[0], range[1]); + + qman_ceetms[dcp_portal].idx = dcp_portal; + INIT_LIST_HEAD(&qman_ceetms[dcp_portal].sub_portals); + INIT_LIST_HEAD(&qman_ceetms[dcp_portal].lnis); + + /* Find Sub-portal range */ + range = of_get_property(node, "fsl,ceetm-sp-range", &ret); + if (!range) { + pr_err("No fsl,ceetm-sp-range in node %s\n", node->full_name); + return -EINVAL; + } + if (ret != 8) { + pr_err("fsl,ceetm-sp-range is not a 2-cell range in node %s\n", + node->full_name); + return -EINVAL; + } + + for (i = 0; i < range[1]; i++) { + sp = kzalloc(sizeof(*sp), GFP_KERNEL); + if (!sp) { + pr_err("Can't alloc memory for sub-portal %d\n", + range[0] + i); + return -ENOMEM; + } + sp->idx = range[0] + i; + sp->dcp_idx = dcp_portal; + sp->is_claimed = 0; + list_add_tail(&sp->node, &qman_ceetms[dcp_portal].sub_portals); + sp++; + } + pr_debug("Qman: Reserve sub-portal %d:%d for CEETM %d\n", + range[0], range[1], dcp_portal); + qman_ceetms[dcp_portal].sp_range[0] = range[0]; + qman_ceetms[dcp_portal].sp_range[1] = range[1]; + + /* Find LNI range */ + range = of_get_property(node, "fsl,ceetm-lni-range", &ret); + if (!range) { + pr_err("No fsl,ceetm-lni-range in node %s\n", node->full_name); + return -EINVAL; + } + if (ret != 8) { + pr_err("fsl,ceetm-lni-range is not a 2-cell range in node %s\n", + node->full_name); + return -EINVAL; + } + + for (i = 0; i < range[1]; i++) { + lni = kzalloc(sizeof(*lni), GFP_KERNEL); + if (!lni) { + pr_err("Can't alloc memory for LNI %d\n", + range[0] + i); + return -ENOMEM; + } + lni->idx = range[0] + i; + lni->dcp_idx = dcp_portal; + lni->is_claimed = 0; + INIT_LIST_HEAD(&lni->channels); + list_add_tail(&lni->node, &qman_ceetms[dcp_portal].lnis); + lni++; + } + pr_debug("Qman: Reserve LNI %d:%d for CEETM %d\n", + range[0], range[1], dcp_portal); + qman_ceetms[dcp_portal].lni_range[0] = range[0]; + qman_ceetms[dcp_portal].lni_range[1] = range[1]; + + /* Find CEETM channel range */ + range = of_get_property(node, "fsl,ceetm-channel-range", &ret); + if (!range) { + pr_err("No fsl,ceetm-channel-range in node %s\n", + node->full_name); + return -EINVAL; + } + if (ret != 8) { + pr_err("fsl,ceetm-channel-range is not a 2-cell range in node" + "%s\n", node->full_name); + return -EINVAL; + } + + if (dcp_portal == qm_dc_portal_fman0) + qman_release_ceetm0_channel_range(range[0], range[1]); + if (dcp_portal == qm_dc_portal_fman1) + qman_release_ceetm1_channel_range(range[0], range[1]); + pr_debug("Qman: The channel allocator of CEETM %d includes" + " range %d:%d\n", dcp_portal, range[0], range[1]); + + /* Set CEETM PRES register */ + ret = qman_ceetm_set_prescaler(dcp_portal); + if (ret) + return ret; + return 0; +} + +void qman_get_ip_revision(struct device_node *dn) +{ + u16 ip_rev = 0; + for_each_compatible_node(dn, NULL, "fsl,qman-portal") { + if (!of_device_is_available(dn)) + continue; + if (of_device_is_compatible(dn, "fsl,qman-portal-1.0") || + of_device_is_compatible(dn, "fsl,qman-portal-1.0.0")) { + ip_rev = QMAN_REV10; + qman_portal_max = 10; + } else if (of_device_is_compatible(dn, "fsl,qman-portal-1.1") || + of_device_is_compatible(dn, "fsl,qman-portal-1.1.0")) { + ip_rev = QMAN_REV11; + qman_portal_max = 10; + } else if (of_device_is_compatible(dn, "fsl,qman-portal-1.2") || + of_device_is_compatible(dn, "fsl,qman-portal-1.2.0")) { + ip_rev = QMAN_REV12; + qman_portal_max = 10; + } else if (of_device_is_compatible(dn, "fsl,qman-portal-2.0") || + of_device_is_compatible(dn, "fsl,qman-portal-2.0.0")) { + ip_rev = QMAN_REV20; + qman_portal_max = 3; + } else if (of_device_is_compatible(dn, + "fsl,qman-portal-3.0.0")) { + ip_rev = QMAN_REV30; + qman_portal_max = 50; + } else if (of_device_is_compatible(dn, + "fsl,qman-portal-3.0.1")) { + ip_rev = QMAN_REV30; + qman_portal_max = 25; + } else if (of_device_is_compatible(dn, + "fsl,qman-portal-3.0.2")) { + ip_rev = QMAN_REV30; + qman_portal_max = 10; + } else if (of_device_is_compatible(dn, + "fsl,qman-portal-3.0.3")) { + ip_rev = QMAN_REV30; + qman_portal_max = 18; + } + + if (!qman_ip_rev) { + if (ip_rev) { + qman_ip_rev = ip_rev; + } else { + pr_warning("unknown Qman version," + " default to rev1.1\n"); + qman_ip_rev = QMAN_REV11; + } + } else if (ip_rev && (qman_ip_rev != ip_rev)) + pr_warning("Revision=0x%04x, but portal '%s' has" + " 0x%04x\n", + qman_ip_rev, dn->full_name, ip_rev); + if (qman_ip_rev == ip_rev) + break; + } +} + +/* Parse a portal node, perform generic mapping duties and return the config. It + * is not known at this stage for what purpose (or even if) the portal will be + * used. */ +static struct qm_portal_config * __init parse_pcfg(struct device_node *node) +{ + struct qm_portal_config *pcfg; + const u32 *index, *channel; + int irq, ret; + + pcfg = kmalloc(sizeof(*pcfg), GFP_KERNEL); + if (!pcfg) { + pr_err("can't allocate portal config"); + return NULL; + } + + ret = of_address_to_resource(node, DPA_PORTAL_CE, + &pcfg->addr_phys[DPA_PORTAL_CE]); + if (ret) { + pr_err("Can't get %s property '%s'\n", node->full_name, + "reg::CE"); + goto err; + } + ret = of_address_to_resource(node, DPA_PORTAL_CI, + &pcfg->addr_phys[DPA_PORTAL_CI]); + if (ret) { + pr_err("Can't get %s property '%s'\n", node->full_name, + "reg::CI"); + goto err; + } + index = of_get_property(node, "cell-index", &ret); + if (!index || (ret != 4)) { + pr_err("Can't get %s property '%s'\n", node->full_name, + "cell-index"); + goto err; + } + if (*index >= qman_portal_max) + goto err; + + channel = of_get_property(node, "fsl,qman-channel-id", &ret); + if (!channel || (ret != 4)) { + pr_err("Can't get %s property '%s'\n", node->full_name, + "fsl,qman-channel-id"); + goto err; + } + if (*channel != (*index + QM_CHANNEL_SWPORTAL0)) + pr_err("Warning: node %s has mismatched %s and %s\n", + node->full_name, "cell-index", "fsl,qman-channel-id"); + pcfg->public_cfg.channel = *channel; + pcfg->public_cfg.cpu = -1; + irq = irq_of_parse_and_map(node, 0); + if (irq == NO_IRQ) { + pr_err("Can't get %s property '%s'\n", node->full_name, + "interrupts"); + goto err; + } + pcfg->public_cfg.irq = irq; + pcfg->public_cfg.index = *index; + pcfg->node = node; +#ifdef CONFIG_FSL_QMAN_CONFIG + /* We need the same LIODN offset for all portals */ + qman_liodn_fixup(pcfg->public_cfg.channel); +#endif + + pcfg->addr_virt[DPA_PORTAL_CE] = ioremap_prot( + pcfg->addr_phys[DPA_PORTAL_CE].start, + resource_size(&pcfg->addr_phys[DPA_PORTAL_CE]), + 0); + pcfg->addr_virt[DPA_PORTAL_CI] = ioremap_prot( + pcfg->addr_phys[DPA_PORTAL_CI].start, + resource_size(&pcfg->addr_phys[DPA_PORTAL_CI]), + _PAGE_GUARDED | _PAGE_NO_CACHE); + + return pcfg; +err: + kfree(pcfg); + return NULL; +} + +static struct qm_portal_config *get_pcfg(struct list_head *list) +{ + struct qm_portal_config *pcfg; + if (list_empty(list)) + return NULL; + pcfg = list_entry(list->prev, struct qm_portal_config, list); + list_del(&pcfg->list); + return pcfg; +} + +#ifdef CONFIG_FSL_PAMU +static void portal_set_liodns(const struct qm_portal_config *pcfg, int cpu) +{ + unsigned int index = 0; + unsigned int liodn_cnt = pamu_get_liodn_count(pcfg->node); + while (index < liodn_cnt) { + int ret = pamu_set_stash_dest(pcfg->node, index++, cpu, 1); + if (ret < 0) + pr_warning("Failed to set QMan stashing LIODN\n"); + } +} +#else +#define portal_set_liodns(pcfg, cpu) do { } while (0) +#endif + +static void portal_set_cpu(const struct qm_portal_config *pcfg, int cpu) +{ + portal_set_liodns(pcfg, cpu); +#ifdef CONFIG_FSL_QMAN_CONFIG + if (qman_set_sdest(pcfg->public_cfg.channel, cpu)) +#endif + pr_warning("Failed to set QMan portal's stash request queue\n"); +} + +static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg) +{ + struct qman_portal *p; + struct cpumask oldmask = *tsk_cpus_allowed(current); + + portal_set_cpu(pcfg, pcfg->public_cfg.cpu); + set_cpus_allowed_ptr(current, get_cpu_mask(pcfg->public_cfg.cpu)); + p = qman_create_affine_portal(pcfg, NULL); + if (p) { + u32 irq_sources = 0; + /* Determine what should be interrupt-vs-poll driven */ +#ifdef CONFIG_FSL_DPA_PIRQ_SLOW + irq_sources |= QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI | + QM_PIRQ_CSCI; +#endif +#ifdef CONFIG_FSL_DPA_PIRQ_FAST + irq_sources |= QM_PIRQ_DQRI; +#endif + qman_irqsource_add(irq_sources); + pr_info("Qman portal %sinitialised, cpu %d\n", + pcfg->public_cfg.is_shared ? "(shared) " : "", + pcfg->public_cfg.cpu); + } else + pr_crit("Qman portal failure on cpu %d\n", + pcfg->public_cfg.cpu); + set_cpus_allowed_ptr(current, &oldmask); + return p; +} + +static void init_slave(int cpu) +{ + struct qman_portal *p; + struct cpumask oldmask = *tsk_cpus_allowed(current); + set_cpus_allowed_ptr(current, get_cpu_mask(cpu)); + p = qman_create_affine_slave(shared_portals[shared_portals_idx++]); + if (!p) + pr_err("Qman slave portal failure on cpu %d\n", cpu); + else + pr_info("Qman portal %sinitialised, cpu %d\n", "(slave) ", cpu); + set_cpus_allowed_ptr(current, &oldmask); + if (shared_portals_idx >= num_shared_portals) + shared_portals_idx = 0; +} + +static struct cpumask want_unshared __initdata; +static struct cpumask want_shared __initdata; + +static int __init parse_qportals(char *str) +{ + return parse_portals_bootarg(str, &want_shared, &want_unshared, + "qportals"); +} +__setup("qportals=", parse_qportals); + +static __init int qman_init(void) +{ + struct cpumask slave_cpus; + struct cpumask unshared_cpus = *cpu_none_mask; + struct cpumask shared_cpus = *cpu_none_mask; + LIST_HEAD(unused_pcfgs); + LIST_HEAD(unshared_pcfgs); + LIST_HEAD(shared_pcfgs); + struct device_node *dn; + struct qm_portal_config *pcfg; + struct qman_portal *p; + int cpu, ret; + const u32 *clk; + + /* Initialise the Qman (CCSR) device */ + for_each_compatible_node(dn, NULL, "fsl,qman") { + if (!qman_init_ccsr(dn)) + pr_info("Qman err interrupt handler present\n"); + else + pr_err("Qman CCSR setup failed\n"); + + clk = of_get_property(dn, "clock-frequency", NULL); + if (!clk) + pr_warning("Can't find Qman clock frequency\n"); + else + qman_clk = *clk; + } +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP + /* Setup lookup table for FQ demux */ + ret = qman_setup_fq_lookup_table(fqd_size/64); + if (ret) + return ret; +#endif + + /* Get qman ip revision */ + qman_get_ip_revision(dn); + if ((qman_ip_rev & 0xff00) >= QMAN_REV30) { + qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3; + qm_channel_caam = QMAN_CHANNEL_CAAM_REV3; + qm_channel_pme = QMAN_CHANNEL_PME_REV3; + } + + /* Parse pool channels into the SDQCR mask. (Must happen before portals + * are initialised.) */ + for_each_compatible_node(dn, NULL, "fsl,pool-channel-range") { + ret = fsl_pool_channel_range_sdqcr(dn); + if (ret) + return ret; + } + /* Initialise portals. See bman_driver.c for comments */ + for_each_compatible_node(dn, NULL, "fsl,qman-portal") { + if (!of_device_is_available(dn)) + continue; + pcfg = parse_pcfg(dn); + if (pcfg) { + pcfg->public_cfg.pools = pools_sdqcr; + list_add_tail(&pcfg->list, &unused_pcfgs); + } + } + for_each_cpu(cpu, &want_shared) { + pcfg = get_pcfg(&unused_pcfgs); + if (!pcfg) + break; + pcfg->public_cfg.cpu = cpu; + list_add_tail(&pcfg->list, &shared_pcfgs); + cpumask_set_cpu(cpu, &shared_cpus); + } + for_each_cpu(cpu, &want_unshared) { + if (cpumask_test_cpu(cpu, &shared_cpus)) + continue; + pcfg = get_pcfg(&unused_pcfgs); + if (!pcfg) + break; + pcfg->public_cfg.cpu = cpu; + list_add_tail(&pcfg->list, &unshared_pcfgs); + cpumask_set_cpu(cpu, &unshared_cpus); + } + if (list_empty(&shared_pcfgs) && list_empty(&unshared_pcfgs)) { + for_each_online_cpu(cpu) { + pcfg = get_pcfg(&unused_pcfgs); + if (!pcfg) + break; + pcfg->public_cfg.cpu = cpu; + list_add_tail(&pcfg->list, &unshared_pcfgs); + cpumask_set_cpu(cpu, &unshared_cpus); + } + } + cpumask_andnot(&slave_cpus, cpu_online_mask, &shared_cpus); + cpumask_andnot(&slave_cpus, &slave_cpus, &unshared_cpus); + if (cpumask_empty(&slave_cpus)) { + if (!list_empty(&shared_pcfgs)) { + cpumask_or(&unshared_cpus, &unshared_cpus, + &shared_cpus); + cpumask_clear(&shared_cpus); + list_splice_tail(&shared_pcfgs, &unshared_pcfgs); + INIT_LIST_HEAD(&shared_pcfgs); + } + } else { + if (list_empty(&shared_pcfgs)) { + pcfg = get_pcfg(&unshared_pcfgs); + if (!pcfg) { + pr_crit("No QMan portals available!\n"); + return 0; + } + cpumask_clear_cpu(pcfg->public_cfg.cpu, &unshared_cpus); + cpumask_set_cpu(pcfg->public_cfg.cpu, &shared_cpus); + list_add_tail(&pcfg->list, &shared_pcfgs); + } + } + list_for_each_entry(pcfg, &unshared_pcfgs, list) { + pcfg->public_cfg.is_shared = 0; + p = init_pcfg(pcfg); + } + list_for_each_entry(pcfg, &shared_pcfgs, list) { + pcfg->public_cfg.is_shared = 1; + p = init_pcfg(pcfg); + if (p) + shared_portals[num_shared_portals++] = p; + } + if (!cpumask_empty(&slave_cpus)) + for_each_cpu(cpu, &slave_cpus) + init_slave(cpu); + pr_info("Qman portals initialised\n"); + /* Initialise FQID allocation ranges */ + for_each_compatible_node(dn, NULL, "fsl,fqid-range") { + ret = fsl_fqid_range_init(dn); + if (ret) + return ret; + } + /* Initialise CGRID allocation ranges */ + for_each_compatible_node(dn, NULL, "fsl,cgrid-range") { + ret = fsl_cgrid_range_init(dn); + if (ret) + return ret; + } + /* Parse pool channels into the allocator. (Must happen after portals + * are initialised.) */ + for_each_compatible_node(dn, NULL, "fsl,pool-channel-range") { + ret = fsl_pool_channel_range_init(dn); + if (ret) + return ret; + } + + /* Parse CEETM */ + for_each_compatible_node(dn, NULL, "fsl,qman-ceetm") { + ret = fsl_ceetm_init(dn); + if (ret) + return ret; + } + return 0; +} +subsys_initcall(qman_init); diff --git a/drivers/staging/fsl_qbman/qman_high.c b/drivers/staging/fsl_qbman/qman_high.c new file mode 100644 index 0000000..04290a2 --- /dev/null +++ b/drivers/staging/fsl_qbman/qman_high.c @@ -0,0 +1,4329 @@ +/* Copyright 2008-2012 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "qman_low.h" + +/* Compilation constants */ +#define DQRR_MAXFILL 15 +#define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */ +#define IRQNAME "QMan portal %d" +#define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */ + +/* Lock/unlock frame queues, subject to the "LOCKED" flag. This is about + * inter-processor locking only. Note, FQLOCK() is always called either under a + * local_irq_save() or from interrupt context - hence there's no need for irq + * protection (and indeed, attempting to nest irq-protection doesn't work, as + * the "irq en/disable" machinery isn't recursive...). */ +#define FQLOCK(fq) \ + do { \ + struct qman_fq *__fq478 = (fq); \ + if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \ + spin_lock(&__fq478->fqlock); \ + } while(0) +#define FQUNLOCK(fq) \ + do { \ + struct qman_fq *__fq478 = (fq); \ + if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \ + spin_unlock(&__fq478->fqlock); \ + } while(0) + +static inline void fq_set(struct qman_fq *fq, u32 mask) +{ + set_bits(mask, &fq->flags); +} +static inline void fq_clear(struct qman_fq *fq, u32 mask) +{ + clear_bits(mask, &fq->flags); +} +static inline int fq_isset(struct qman_fq *fq, u32 mask) +{ + return fq->flags & mask; +} +static inline int fq_isclear(struct qman_fq *fq, u32 mask) +{ + return !(fq->flags & mask); +} + +struct qman_portal { + struct qm_portal p; + unsigned long bits; /* PORTAL_BITS_*** - dynamic, strictly internal */ + unsigned long irq_sources; + u32 slowpoll; /* only used when interrupts are off */ + struct qman_fq *vdqcr_owned; /* only 1 volatile dequeue at a time */ +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC + struct qman_fq *eqci_owned; /* only 1 enqueue WAIT_SYNC at a time */ +#endif +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE + raw_spinlock_t sharing_lock; /* only used if is_shared */ + int is_shared; + struct qman_portal *sharing_redirect; +#endif + u32 sdqcr; + int dqrr_disable_ref; + /* A portal-specific handler for DCP ERNs. If this is NULL, the global + * handler is called instead. */ + qman_cb_dc_ern cb_dc_ern; + /* When the cpu-affine portal is activated, this is non-NULL */ + const struct qm_portal_config *config; + /* This is needed for providing a non-NULL device to dma_map_***() */ + struct platform_device *pdev; + struct dpa_rbtree retire_table; + char irqname[MAX_IRQNAME]; + /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */ + struct qman_cgrs *cgrs; + /* 256-element array, each is a linked-list of CSCN handlers. */ + struct list_head cgr_cbs[256]; + /* list lock */ + spinlock_t cgr_lock; +}; + +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE +#define PORTAL_IRQ_LOCK(p, irqflags) \ + do { \ + if ((p)->is_shared) \ + raw_spin_lock_irqsave(&(p)->sharing_lock, irqflags); \ + else \ + local_irq_save(irqflags); \ + } while (0) +#define PORTAL_IRQ_UNLOCK(p, irqflags) \ + do { \ + if ((p)->is_shared) \ + raw_spin_unlock_irqrestore(&(p)->sharing_lock, \ + irqflags); \ + else \ + local_irq_restore(irqflags); \ + } while (0) +#else +#define PORTAL_IRQ_LOCK(p, irqflags) local_irq_save(irqflags) +#define PORTAL_IRQ_UNLOCK(p, irqflags) local_irq_restore(irqflags) +#endif + +/* Global handler for DCP ERNs. Used when the portal receiving the message does + * not have a portal-specific handler. */ +static qman_cb_dc_ern cb_dc_ern; + +static cpumask_t affine_mask; +static DEFINE_SPINLOCK(affine_mask_lock); +static u16 affine_channels[NR_CPUS]; +static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal); +/* "raw" gets the cpu-local struct whether it's a redirect or not. */ +static inline struct qman_portal *get_raw_affine_portal(void) +{ + return &get_cpu_var(qman_affine_portal); +} +/* For ops that can redirect, this obtains the portal to use */ +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE +static inline struct qman_portal *get_affine_portal(void) +{ + struct qman_portal *p = get_raw_affine_portal(); + if (p->sharing_redirect) + return p->sharing_redirect; + return p; +} +#else +#define get_affine_portal() get_raw_affine_portal() +#endif +/* For every "get", there must be a "put" */ +static inline void put_affine_portal(void) +{ + put_cpu_var(qman_affine_portal); +} +/* Exception: poll functions assume the caller is cpu-affine and in no risk of + * re-entrance, which are the two reasons we usually use the get/put_cpu_var() + * semantic - ie. to disable pre-emption. Some use-cases expect the execution + * context to remain as non-atomic during poll-triggered callbacks as it was + * when the poll API was first called (eg. NAPI), so we go out of our way in + * this case to not disable pre-emption. */ +static inline struct qman_portal *get_poll_portal(void) +{ + return &__get_cpu_var(qman_affine_portal); +} +#define put_poll_portal() do { ; } while (0) + +/* This gives a FQID->FQ lookup to cover the fact that we can't directly demux + * retirement notifications (the fact they are sometimes h/w-consumed means that + * contextB isn't always a s/w demux - and as we can't know which case it is + * when looking at the notification, we have to use the slow lookup for all of + * them). NB, it's possible to have multiple FQ objects refer to the same FQID + * (though at most one of them should be the consumer), so this table isn't for + * all FQs - FQs are added when retirement commands are issued, and removed when + * they complete, which also massively reduces the size of this table. */ +IMPLEMENT_DPA_RBTREE(fqtree, struct qman_fq, node, fqid); + +/* This is what everything can wait on, even if it migrates to a different cpu + * to the one whose affine portal it is waiting on. */ +static DECLARE_WAIT_QUEUE_HEAD(affine_queue); + +static inline int table_push_fq(struct qman_portal *p, struct qman_fq *fq) +{ + int ret = fqtree_push(&p->retire_table, fq); + if (ret) + pr_err("ERROR: double FQ-retirement %d\n", fq->fqid); + return ret; +} + +static inline void table_del_fq(struct qman_portal *p, struct qman_fq *fq) +{ + fqtree_del(&p->retire_table, fq); +} + +static inline struct qman_fq *table_find_fq(struct qman_portal *p, u32 fqid) +{ + return fqtree_find(&p->retire_table, fqid); +} + +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP +static void **qman_fq_lookup_table; +static size_t qman_fq_lookup_table_size; + +int qman_setup_fq_lookup_table(size_t num_entries) +{ + num_entries++; + /* Allocate 1 more entry since the first entry is not used */ + qman_fq_lookup_table = vmalloc((num_entries * sizeof(void *))); + if (!qman_fq_lookup_table) { + pr_err("QMan: Could not allocate fq lookup table\n"); + return -ENOMEM; + } + memset(qman_fq_lookup_table, 0, num_entries * sizeof(void *)); + qman_fq_lookup_table_size = num_entries; + pr_info("QMan: Allocated lookup table at %p, entry count %lu\n", + qman_fq_lookup_table, + (unsigned long)qman_fq_lookup_table_size); + return 0; +} + +/* global structure that maintains fq object mapping */ +static DEFINE_SPINLOCK(fq_hash_table_lock); + +static int find_empty_fq_table_entry(u32 *entry, struct qman_fq *fq) +{ + u32 i; + + spin_lock(&fq_hash_table_lock); + /* Can't use index zero because this has special meaning + * in context_b field. */ + for (i = 1; i < qman_fq_lookup_table_size; i++) { + if (qman_fq_lookup_table[i] == NULL) { + *entry = i; + qman_fq_lookup_table[i] = fq; + spin_unlock(&fq_hash_table_lock); + return 0; + } + } + spin_unlock(&fq_hash_table_lock); + return -ENOMEM; +} + +static void clear_fq_table_entry(u32 entry) +{ + spin_lock(&fq_hash_table_lock); + BUG_ON(entry >= qman_fq_lookup_table_size); + qman_fq_lookup_table[entry] = NULL; + spin_unlock(&fq_hash_table_lock); +} + +static inline struct qman_fq *get_fq_table_entry(u32 entry) +{ + BUG_ON(entry >= qman_fq_lookup_table_size); + return qman_fq_lookup_table[entry]; +} +#endif + +/* In the case that slow- and fast-path handling are both done by qman_poll() + * (ie. because there is no interrupt handling), we ought to balance how often + * we do the fast-path poll versus the slow-path poll. We'll use two decrementer + * sources, so we call the fast poll 'n' times before calling the slow poll + * once. The idle decrementer constant is used when the last slow-poll detected + * no work to do, and the busy decrementer constant when the last slow-poll had + * work to do. */ +#define SLOW_POLL_IDLE 1000 +#define SLOW_POLL_BUSY 10 +static u32 __poll_portal_slow(struct qman_portal *p, u32 is); +static inline unsigned int __poll_portal_fast(struct qman_portal *p, + unsigned int poll_limit); + +/* Portal interrupt handler */ +static irqreturn_t portal_isr(__always_unused int irq, void *ptr) +{ + struct qman_portal *p = ptr; + /* The CSCI source is cleared inside __poll_portal_slow(), because + * it could race against a Query Congestion State command also given + * as part of the handling of this interrupt source. We mustn't + * clear it a second time in this top-level function. */ + u32 clear = QM_DQAVAIL_MASK | (p->irq_sources & ~QM_PIRQ_CSCI); + u32 is = qm_isr_status_read(&p->p) & p->irq_sources; + /* DQRR-handling if it's interrupt-driven */ + if (is & QM_PIRQ_DQRI) + __poll_portal_fast(p, CONFIG_FSL_QMAN_POLL_LIMIT); + /* Handling of anything else that's interrupt-driven */ + clear |= __poll_portal_slow(p, is); + qm_isr_status_clear(&p->p, clear); + return IRQ_HANDLED; +} + +/* This inner version is used privately by qman_create_affine_portal(), as well + * as by the exported qman_stop_dequeues(). */ +static inline void qman_stop_dequeues_ex(struct qman_portal *p) +{ + unsigned long irqflags __maybe_unused; + PORTAL_IRQ_LOCK(p, irqflags); + if (!(p->dqrr_disable_ref++)) + qm_dqrr_set_maxfill(&p->p, 0); + PORTAL_IRQ_UNLOCK(p, irqflags); +} + +static int drain_mr_fqrni(struct qm_portal *p) +{ + const struct qm_mr_entry *msg; +loop: + msg = qm_mr_current(p); + if (!msg) { + /* if MR was full and h/w had other FQRNI entries to produce, we + * need to allow it time to produce those entries once the + * existing entries are consumed. A worst-case situation + * (fully-loaded system) means h/w sequencers may have to do 3-4 + * other things before servicing the portal's MR pump, each of + * which (if slow) may take ~50 qman cycles (which is ~200 + * processor cycles). So rounding up and then multiplying this + * worst-case estimate by a factor of 10, just to be + * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume + * one entry at a time, so h/w has an opportunity to produce new + * entries well before the ring has been fully consumed, so + * we're being *really* paranoid here. */ + u64 now, then = mfatb(); + do { + now = mfatb(); + } while ((then + 10000) > now); + msg = qm_mr_current(p); + if (!msg) + return 0; + } + if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) + /* We aren't draining anything but FQRNIs */ + return -1; + qm_mr_next(p); + qm_mr_cci_consume(p, 1); + goto loop; +} + +struct qman_portal *qman_create_affine_portal( + const struct qm_portal_config *config, + const struct qman_cgrs *cgrs) +{ + struct qman_portal *portal = get_raw_affine_portal(); + struct qm_portal *__p = &portal->p; + char buf[16]; + int ret; + u32 isdr; + + /* A criteria for calling this function (from qman_driver.c) is that + * we're already affine to the cpu and won't schedule onto another cpu. + * This means we can put_affine_portal() and yet continue to use + * "portal", which in turn means aspects of this routine can sleep. */ + put_affine_portal(); + /* prep the low-level portal struct with the mapped addresses from the + * config, everything that follows depends on it and "config" is more + * for (de)reference... */ + __p->addr.addr_ce = config->addr_virt[DPA_PORTAL_CE]; + __p->addr.addr_ci = config->addr_virt[DPA_PORTAL_CI]; + if (qm_eqcr_init(__p, qm_eqcr_pvb, qm_eqcr_cce)) { + pr_err("Qman EQCR initialisation failed\n"); + goto fail_eqcr; + } + if (qm_dqrr_init(__p, config, qm_dqrr_dpush, qm_dqrr_pvb, + qm_dqrr_cdc, DQRR_MAXFILL)) { + pr_err("Qman DQRR initialisation failed\n"); + goto fail_dqrr; + } + if (qm_mr_init(__p, qm_mr_pvb, qm_mr_cci)) { + pr_err("Qman MR initialisation failed\n"); + goto fail_mr; + } + if (qm_mc_init(__p)) { + pr_err("Qman MC initialisation failed\n"); + goto fail_mc; + } + if (qm_isr_init(__p)) { + pr_err("Qman ISR initialisation failed\n"); + goto fail_isr; + } + /* static interrupt-gating controls */ + qm_dqrr_set_ithresh(__p, CONFIG_FSL_QMAN_PIRQ_DQRR_ITHRESH); + qm_mr_set_ithresh(__p, CONFIG_FSL_QMAN_PIRQ_MR_ITHRESH); + qm_isr_set_iperiod(__p, CONFIG_FSL_QMAN_PIRQ_IPERIOD); + portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL); + if (!portal->cgrs) + goto fail_cgrs; + /* initial snapshot is no-depletion */ + qman_cgrs_init(&portal->cgrs[1]); + if (cgrs) + portal->cgrs[0] = *cgrs; + else + /* if the given mask is NULL, assume all CGRs can be seen */ + qman_cgrs_fill(&portal->cgrs[0]); + for (ret = 0; ret < __CGR_NUM; ret++) + INIT_LIST_HEAD(&portal->cgr_cbs[ret]); + spin_lock_init(&portal->cgr_lock); + portal->bits = 0; + portal->slowpoll = 0; +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC + portal->eqci_owned = NULL; +#endif +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE + raw_spin_lock_init(&portal->sharing_lock); + portal->is_shared = config->public_cfg.is_shared; + portal->sharing_redirect = NULL; +#endif + portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 | + QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS | + QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED; + portal->dqrr_disable_ref = 0; + portal->cb_dc_ern = NULL; + sprintf(buf, "qportal-%d", config->public_cfg.channel); + portal->pdev = platform_device_alloc(buf, -1); + if (!portal->pdev) + goto fail_devalloc; + if (dma_set_mask(&portal->pdev->dev, DMA_BIT_MASK(40))) + goto fail_devadd; + ret = platform_device_add(portal->pdev); + if (ret) + goto fail_devadd; + dpa_rbtree_init(&portal->retire_table); + isdr = 0xffffffff; + qm_isr_disable_write(__p, isdr); + portal->irq_sources = 0; + qm_isr_enable_write(__p, portal->irq_sources); + qm_isr_status_clear(__p, 0xffffffff); + snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, config->public_cfg.cpu); + if (request_irq(config->public_cfg.irq, portal_isr, 0, portal->irqname, + portal)) { + pr_err("request_irq() failed\n"); + goto fail_irq; + } + if ((config->public_cfg.cpu != -1) && + irq_can_set_affinity(config->public_cfg.irq) && + irq_set_affinity(config->public_cfg.irq, + cpumask_of(config->public_cfg.cpu))) { + pr_err("irq_set_affinity() failed\n"); + goto fail_affinity; + } + /* Need EQCR to be empty before continuing */ + isdr ^= QM_PIRQ_EQCI; + qm_isr_disable_write(__p, isdr); + ret = qm_eqcr_get_fill(__p); + if (ret) { + pr_err("Qman EQCR unclean\n"); + goto fail_eqcr_empty; + } + isdr ^= (QM_PIRQ_DQRI | QM_PIRQ_MRI); + qm_isr_disable_write(__p, isdr); + if (qm_dqrr_current(__p) != NULL) { + pr_err("Qman DQRR unclean\n"); + goto fail_dqrr_mr_empty; + } + if (qm_mr_current(__p) != NULL) { + /* special handling, drain just in case it's a few FQRNIs */ + if (drain_mr_fqrni(__p)) { + pr_err("Qman MR unclean\n"); + goto fail_dqrr_mr_empty; + } + } + /* Success */ + portal->config = config; + spin_lock(&affine_mask_lock); + cpumask_set_cpu(config->public_cfg.cpu, &affine_mask); + affine_channels[config->public_cfg.cpu] = config->public_cfg.channel; + spin_unlock(&affine_mask_lock); + qm_isr_disable_write(__p, 0); + qm_isr_uninhibit(__p); + /* Write a sane SDQCR */ + qm_dqrr_sdqcr_set(__p, portal->sdqcr); + return portal; +fail_dqrr_mr_empty: +fail_eqcr_empty: +fail_affinity: + free_irq(config->public_cfg.irq, portal); +fail_irq: + platform_device_del(portal->pdev); +fail_devadd: + platform_device_put(portal->pdev); +fail_devalloc: + if (portal->cgrs) + kfree(portal->cgrs); +fail_cgrs: + qm_isr_finish(__p); +fail_isr: + qm_mc_finish(__p); +fail_mc: + qm_mr_finish(__p); +fail_mr: + qm_dqrr_finish(__p); +fail_dqrr: + qm_eqcr_finish(__p); +fail_eqcr: + return NULL; +} + +/* These checks are BUG_ON()s because the driver is already supposed to avoid + * these cases. */ +struct qman_portal *qman_create_affine_slave(struct qman_portal *redirect) +{ +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE + struct qman_portal *p = get_raw_affine_portal(); + /* Check that we don't already have our own portal */ + BUG_ON(p->config); + /* Check that we aren't already slaving to another portal */ + BUG_ON(p->is_shared); + /* Check that 'redirect' is prepared to have us */ + BUG_ON(!redirect->config->public_cfg.is_shared); + /* These are the only elements to initialise when redirecting */ + p->irq_sources = 0; + p->sharing_redirect = redirect; + put_affine_portal(); + return p; +#else + BUG(); + return NULL; +#endif +} + +const struct qm_portal_config *qman_destroy_affine_portal(void) +{ + /* We don't want to redirect if we're a slave, use "raw" */ + struct qman_portal *qm = get_raw_affine_portal(); + const struct qm_portal_config *pcfg; + int cpu; +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE + if (qm->sharing_redirect) { + qm->sharing_redirect = NULL; + put_affine_portal(); + return NULL; + } + qm->is_shared = 0; +#endif + pcfg = qm->config; + cpu = pcfg->public_cfg.cpu; + /* NB we do this to "quiesce" EQCR. If we add enqueue-completions or + * something related to QM_PIRQ_EQCI, this may need fixing. + * Also, due to the prefetching model used for CI updates in the enqueue + * path, this update will only invalidate the CI cacheline *after* + * working on it, so we need to call this twice to ensure a full update + * irrespective of where the enqueue processing was at when the teardown + * began. */ + qm_eqcr_cce_update(&qm->p); + qm_eqcr_cce_update(&qm->p); + free_irq(pcfg->public_cfg.irq, qm); + kfree(qm->cgrs); + qm_isr_finish(&qm->p); + qm_mc_finish(&qm->p); + qm_mr_finish(&qm->p); + qm_dqrr_finish(&qm->p); + qm_eqcr_finish(&qm->p); + qm->config = NULL; + spin_lock(&affine_mask_lock); + cpumask_clear_cpu(cpu, &affine_mask); + spin_unlock(&affine_mask_lock); + put_affine_portal(); + return pcfg; +} + +const struct qman_portal_config *qman_get_portal_config(void) +{ + struct qman_portal *p = get_affine_portal(); + const struct qman_portal_config *ret = &p->config->public_cfg; + put_affine_portal(); + return ret; +} +EXPORT_SYMBOL(qman_get_portal_config); + +/* Inline helper to reduce nesting in __poll_portal_slow() */ +static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq, + const struct qm_mr_entry *msg, u8 verb) +{ + FQLOCK(fq); + switch(verb) { + case QM_MR_VERB_FQRL: + DPA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL)); + fq_clear(fq, QMAN_FQ_STATE_ORL); + table_del_fq(p, fq); + break; + case QM_MR_VERB_FQRN: + DPA_ASSERT((fq->state == qman_fq_state_parked) || + (fq->state == qman_fq_state_sched)); + DPA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING)); + fq_clear(fq, QMAN_FQ_STATE_CHANGING); + if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY) + fq_set(fq, QMAN_FQ_STATE_NE); + if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT) + fq_set(fq, QMAN_FQ_STATE_ORL); + else + table_del_fq(p, fq); + fq->state = qman_fq_state_retired; + break; + case QM_MR_VERB_FQPN: + DPA_ASSERT(fq->state == qman_fq_state_sched); + DPA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING)); + fq->state = qman_fq_state_parked; + } + FQUNLOCK(fq); +} + +static u32 __poll_portal_slow(struct qman_portal *p, u32 is) +{ + const struct qm_mr_entry *msg; + + if (is & QM_PIRQ_CSCI) { + struct qman_cgrs rr, c; + struct qm_mc_result *mcr; + struct qman_cgr *cgr; + int i; + unsigned long irqflags __maybe_unused; + + spin_lock_irqsave(&p->cgr_lock, irqflags); + /* The CSCI bit must be cleared _before_ issuing the + * Query Congestion State command, to ensure that a long + * CGR State Change callback cannot miss an intervening + * state change. */ + qm_isr_status_clear(&p->p, QM_PIRQ_CSCI); + + qm_mc_start(&p->p); + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION); + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + /* mask out the ones I'm not interested in */ + qman_cgrs_and(&rr, (const struct qman_cgrs *) + &mcr->querycongestion.state, &p->cgrs[0]); + /* check previous snapshot for delta, enter/exit congestion */ + qman_cgrs_xor(&c, &rr, &p->cgrs[1]); + /* update snapshot */ + qman_cgrs_cp(&p->cgrs[1], &rr); + /* Invoke callback */ + qman_cgrs_for_each_1(i, &c) + list_for_each_entry(cgr, &p->cgr_cbs[i], node) { + if (cgr->cb) + cgr->cb(p, cgr, qman_cgrs_get(&rr, i)); + } + spin_unlock_irqrestore(&p->cgr_lock, irqflags); + } + +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC + if (is & QM_PIRQ_EQCI) { + unsigned long irqflags; + PORTAL_IRQ_LOCK(p, irqflags); + p->eqci_owned = NULL; + PORTAL_IRQ_UNLOCK(p, irqflags); + wake_up(&affine_queue); + } +#endif + + if (is & QM_PIRQ_EQRI) { + unsigned long irqflags __maybe_unused; + PORTAL_IRQ_LOCK(p, irqflags); + qm_eqcr_cce_update(&p->p); + qm_eqcr_set_ithresh(&p->p, 0); + PORTAL_IRQ_UNLOCK(p, irqflags); + wake_up(&affine_queue); + } + + if (is & QM_PIRQ_MRI) { + struct qman_fq *fq; + u8 verb, num = 0; +mr_loop: + qm_mr_pvb_update(&p->p); + msg = qm_mr_current(&p->p); + if (!msg) + goto mr_done; + verb = msg->verb & QM_MR_VERB_TYPE_MASK; + /* The message is a software ERN iff the 0x20 bit is set */ + if (verb & 0x20) { + switch (verb) { + case QM_MR_VERB_FQRNI: + /* nada, we drop FQRNIs on the floor */ + break; + case QM_MR_VERB_FQRN: + case QM_MR_VERB_FQRL: + /* Lookup in the retirement table */ + fq = table_find_fq(p, msg->fq.fqid); + BUG_ON(!fq); + fq_state_change(p, fq, msg, verb); + if (fq->cb.fqs) + fq->cb.fqs(p, fq, msg); + break; + case QM_MR_VERB_FQPN: + /* Parked */ +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP + fq = get_fq_table_entry(msg->fq.contextB); +#else + fq = (void *)(uintptr_t)msg->fq.contextB; +#endif + fq_state_change(p, fq, msg, verb); + if (fq->cb.fqs) + fq->cb.fqs(p, fq, msg); + break; + case QM_MR_VERB_DC_ERN: + /* DCP ERN */ + if (p->cb_dc_ern) + p->cb_dc_ern(p, msg); + else if (cb_dc_ern) + cb_dc_ern(p, msg); + else { + static int warn_once; + if (!warn_once) { + pr_crit("Leaking DCP ERNs!\n"); + warn_once = 1; + } + } + break; + default: + pr_crit("Invalid MR verb 0x%02x\n", verb); + } + } else { + /* Its a software ERN */ +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP + fq = get_fq_table_entry(msg->ern.tag); +#else + fq = (void *)(uintptr_t)msg->ern.tag; +#endif + fq->cb.ern(p, fq, msg); + } + num++; + qm_mr_next(&p->p); + goto mr_loop; +mr_done: + qm_mr_cci_consume(&p->p, num); + } + + /* QM_PIRQ_CSCI has already been cleared, as part of its specific + * processing. If that interrupt source has meanwhile been re-asserted, + * we mustn't clear it here (or in the top-level interrupt handler). */ + return is & (QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI); +} + +/* remove some slowish-path stuff from the "fast path" and make sure it isn't + * inlined. */ +static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq) +{ + p->vdqcr_owned = NULL; + FQLOCK(fq); + fq_clear(fq, QMAN_FQ_STATE_VDQCR); + FQUNLOCK(fq); + wake_up(&affine_queue); +} + +/* Look: no locks, no irq_save()s, no preempt_disable()s! :-) The only states + * that would conflict with other things if they ran at the same time on the + * same cpu are; + * + * (i) setting/clearing vdqcr_owned, and + * (ii) clearing the NE (Not Empty) flag. + * + * Both are safe. Because; + * + * (i) this clearing can only occur after qman_volatile_dequeue() has set the + * vdqcr_owned field (which it does before setting VDQCR), and + * qman_volatile_dequeue() blocks interrupts and preemption while this is + * done so that we can't interfere. + * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as + * with (i) that API prevents us from interfering until it's safe. + * + * The good thing is that qman_volatile_dequeue() and qman_retire_fq() run far + * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett + * advantage comes from this function not having to "lock" anything at all. + * + * Note also that the callbacks are invoked at points which are safe against the + * above potential conflicts, but that this function itself is not re-entrant + * (this is because the function tracks one end of each FIFO in the portal and + * we do *not* want to lock that). So the consequence is that it is safe for + * user callbacks to call into any Qman API *except* qman_poll() (as that's the + * sole API that could be invoking the callback through this function). + */ +static inline unsigned int __poll_portal_fast(struct qman_portal *p, + unsigned int poll_limit) +{ + const struct qm_dqrr_entry *dq; + struct qman_fq *fq; + enum qman_cb_dqrr_result res; + unsigned int limit = 0; + +loop: + qm_dqrr_pvb_update(&p->p); + dq = qm_dqrr_current(&p->p); + if (!dq) + goto done; + if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) { + /* VDQCR: don't trust contextB as the FQ may have been + * configured for h/w consumption and we're draining it + * post-retirement. */ + fq = p->vdqcr_owned; + /* We only set QMAN_FQ_STATE_NE when retiring, so we only need + * to check for clearing it when doing volatile dequeues. It's + * one less thing to check in the critical path (SDQCR). */ + if (dq->stat & QM_DQRR_STAT_FQ_EMPTY) + fq_clear(fq, QMAN_FQ_STATE_NE); + /* this is duplicated from the SDQCR code, but we have stuff to + * do before *and* after this callback, and we don't want + * multiple if()s in the critical path (SDQCR). */ + res = fq->cb.dqrr(p, fq, dq); + if (res == qman_cb_dqrr_stop) + goto done; + /* Check for VDQCR completion */ + if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED) + clear_vdqcr(p, fq); + } else { + /* SDQCR: contextB points to the FQ */ +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP + fq = get_fq_table_entry(dq->contextB); +#else + fq = (void *)(uintptr_t)dq->contextB; +#endif + /* Now let the callback do its stuff */ + res = fq->cb.dqrr(p, fq, dq); + /* The callback can request that we exit without consuming this + * entry nor advancing; */ + if (res == qman_cb_dqrr_stop) + goto done; + } + /* Interpret 'dq' from a driver perspective. */ + /* Parking isn't possible unless HELDACTIVE was set. NB, + * FORCEELIGIBLE implies HELDACTIVE, so we only need to + * check for HELDACTIVE to cover both. */ + DPA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) || + (res != qman_cb_dqrr_park)); + /* Defer just means "skip it, I'll consume it myself later on" */ + if (res != qman_cb_dqrr_defer) + qm_dqrr_cdc_consume_1ptr(&p->p, dq, (res == qman_cb_dqrr_park)); + /* Move forward */ + qm_dqrr_next(&p->p); + /* Entry processed and consumed, increment our counter. The callback can + * request that we exit after consuming the entry, and we also exit if + * we reach our processing limit, so loop back only if neither of these + * conditions is met. */ + if ((++limit < poll_limit) && (res != qman_cb_dqrr_consume_stop)) + goto loop; +done: + return limit; +} + +u32 qman_irqsource_get(void) +{ + /* "irqsource" and "poll" APIs mustn't redirect when sharing, they + * should shut the user out if they are not the primary CPU hosting the + * portal. That's why we use the "raw" interface. */ + struct qman_portal *p = get_raw_affine_portal(); + u32 ret = p->irq_sources & QM_PIRQ_VISIBLE; + put_affine_portal(); + return ret; +} +EXPORT_SYMBOL(qman_irqsource_get); + +int qman_irqsource_add(u32 bits __maybe_unused) +{ + struct qman_portal *p = get_raw_affine_portal(); + int ret = 0; +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE + if (p->sharing_redirect) + ret = -EINVAL; + else +#endif + { + __maybe_unused unsigned long irqflags; + PORTAL_IRQ_LOCK(p, irqflags); + set_bits(bits & QM_PIRQ_VISIBLE, &p->irq_sources); + qm_isr_enable_write(&p->p, p->irq_sources); + PORTAL_IRQ_UNLOCK(p, irqflags); + } + put_affine_portal(); + return ret; +} +EXPORT_SYMBOL(qman_irqsource_add); + +int qman_irqsource_remove(u32 bits) +{ + struct qman_portal *p = get_raw_affine_portal(); + __maybe_unused unsigned long irqflags; + u32 ier; +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE + if (p->sharing_redirect) { + put_affine_portal(); + return -EINVAL; + } +#endif + /* Our interrupt handler only processes+clears status register bits that + * are in p->irq_sources. As we're trimming that mask, if one of them + * were to assert in the status register just before we remove it from + * the enable register, there would be an interrupt-storm when we + * release the IRQ lock. So we wait for the enable register update to + * take effect in h/w (by reading it back) and then clear all other bits + * in the status register. Ie. we clear them from ISR once it's certain + * IER won't allow them to reassert. */ + PORTAL_IRQ_LOCK(p, irqflags); + bits &= QM_PIRQ_VISIBLE; + clear_bits(bits, &p->irq_sources); + qm_isr_enable_write(&p->p, p->irq_sources); + ier = qm_isr_enable_read(&p->p); + /* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a + * data-dependency, ie. to protect against re-ordering. */ + qm_isr_status_clear(&p->p, ~ier); + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + return 0; +} +EXPORT_SYMBOL(qman_irqsource_remove); + +const cpumask_t *qman_affine_cpus(void) +{ + return &affine_mask; +} +EXPORT_SYMBOL(qman_affine_cpus); + +u16 qman_affine_channel(int cpu) +{ + if (cpu < 0) { + struct qman_portal *portal = get_raw_affine_portal(); +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE + BUG_ON(portal->sharing_redirect); +#endif + cpu = portal->config->public_cfg.cpu; + put_affine_portal(); + } + BUG_ON(!cpumask_test_cpu(cpu, &affine_mask)); + return affine_channels[cpu]; +} +EXPORT_SYMBOL(qman_affine_channel); + +int qman_poll_dqrr(unsigned int limit) +{ + struct qman_portal *p = get_poll_portal(); + int ret; +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE + if (unlikely(p->sharing_redirect)) + ret = -EINVAL; + else +#endif + { + BUG_ON(p->irq_sources & QM_PIRQ_DQRI); + ret = __poll_portal_fast(p, limit); + } + put_poll_portal(); + return ret; +} +EXPORT_SYMBOL(qman_poll_dqrr); + +u32 qman_poll_slow(void) +{ + struct qman_portal *p = get_poll_portal(); + u32 ret; +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE + if (unlikely(p->sharing_redirect)) + ret = (u32)-1; + else +#endif + { + u32 is = qm_isr_status_read(&p->p) & ~p->irq_sources; + ret = __poll_portal_slow(p, is); + qm_isr_status_clear(&p->p, ret); + } + put_poll_portal(); + return ret; +} +EXPORT_SYMBOL(qman_poll_slow); + +/* Legacy wrapper */ +void qman_poll(void) +{ + struct qman_portal *p = get_poll_portal(); +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE + if (unlikely(p->sharing_redirect)) + goto done; +#endif + if ((~p->irq_sources) & QM_PIRQ_SLOW) { + if (!(p->slowpoll--)) { + u32 is = qm_isr_status_read(&p->p) & ~p->irq_sources; + u32 active = __poll_portal_slow(p, is); + if (active) { + qm_isr_status_clear(&p->p, active); + p->slowpoll = SLOW_POLL_BUSY; + } else + p->slowpoll = SLOW_POLL_IDLE; + } + } + if ((~p->irq_sources) & QM_PIRQ_DQRI) + __poll_portal_fast(p, CONFIG_FSL_QMAN_POLL_LIMIT); +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE +done: +#endif + put_poll_portal(); +} +EXPORT_SYMBOL(qman_poll); + +void qman_stop_dequeues(void) +{ + struct qman_portal *p = get_affine_portal(); + qman_stop_dequeues_ex(p); + put_affine_portal(); +} +EXPORT_SYMBOL(qman_stop_dequeues); + +void qman_start_dequeues(void) +{ + struct qman_portal *p = get_affine_portal(); + unsigned long irqflags __maybe_unused; + PORTAL_IRQ_LOCK(p, irqflags); + DPA_ASSERT(p->dqrr_disable_ref > 0); + if (!(--p->dqrr_disable_ref)) + qm_dqrr_set_maxfill(&p->p, DQRR_MAXFILL); + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); +} +EXPORT_SYMBOL(qman_start_dequeues); + +void qman_static_dequeue_add(u32 pools) +{ + unsigned long irqflags __maybe_unused; + struct qman_portal *p = get_affine_portal(); + PORTAL_IRQ_LOCK(p, irqflags); + pools &= p->config->public_cfg.pools; + p->sdqcr |= pools; + qm_dqrr_sdqcr_set(&p->p, p->sdqcr); + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); +} +EXPORT_SYMBOL(qman_static_dequeue_add); + +void qman_static_dequeue_del(u32 pools) +{ + struct qman_portal *p = get_affine_portal(); + unsigned long irqflags __maybe_unused; + PORTAL_IRQ_LOCK(p, irqflags); + pools &= p->config->public_cfg.pools; + p->sdqcr &= ~pools; + qm_dqrr_sdqcr_set(&p->p, p->sdqcr); + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); +} +EXPORT_SYMBOL(qman_static_dequeue_del); + +u32 qman_static_dequeue_get(void) +{ + struct qman_portal *p = get_affine_portal(); + u32 ret = p->sdqcr; + put_affine_portal(); + return ret; +} +EXPORT_SYMBOL(qman_static_dequeue_get); + +void qman_dca(struct qm_dqrr_entry *dq, int park_request) +{ + struct qman_portal *p = get_affine_portal(); + qm_dqrr_cdc_consume_1ptr(&p->p, dq, park_request); + put_affine_portal(); +} +EXPORT_SYMBOL(qman_dca); + +/*******************/ +/* Frame queue API */ +/*******************/ + +static const char *mcr_result_str(u8 result) +{ + switch (result) { + case QM_MCR_RESULT_NULL: + return "QM_MCR_RESULT_NULL"; + case QM_MCR_RESULT_OK: + return "QM_MCR_RESULT_OK"; + case QM_MCR_RESULT_ERR_FQID: + return "QM_MCR_RESULT_ERR_FQID"; + case QM_MCR_RESULT_ERR_FQSTATE: + return "QM_MCR_RESULT_ERR_FQSTATE"; + case QM_MCR_RESULT_ERR_NOTEMPTY: + return "QM_MCR_RESULT_ERR_NOTEMPTY"; + case QM_MCR_RESULT_PENDING: + return "QM_MCR_RESULT_PENDING"; + case QM_MCR_RESULT_ERR_BADCOMMAND: + return "QM_MCR_RESULT_ERR_BADCOMMAND"; + } + return ""; +} + +int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq) +{ + struct qm_fqd fqd; + struct qm_mcr_queryfq_np np; + struct qm_mc_command *mcc; + struct qm_mc_result *mcr; + struct qman_portal *p; + unsigned long irqflags __maybe_unused; + + if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) { + int ret = qman_alloc_fqid(&fqid); + if (ret) + return ret; + } + spin_lock_init(&fq->fqlock); + fq->fqid = fqid; + fq->flags = flags; + fq->state = qman_fq_state_oos; + fq->cgr_groupid = 0; +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP + if (unlikely(find_empty_fq_table_entry(&fq->key, fq))) + return -ENOMEM; +#endif + if (!(flags & QMAN_FQ_FLAG_AS_IS) || (flags & QMAN_FQ_FLAG_NO_MODIFY)) + return 0; + /* Everything else is AS_IS support */ + p = get_affine_portal(); + PORTAL_IRQ_LOCK(p, irqflags); + mcc = qm_mc_start(&p->p); + mcc->queryfq.fqid = fqid; + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ); + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ); + if (mcr->result != QM_MCR_RESULT_OK) { + pr_err("QUERYFQ failed: %s\n", mcr_result_str(mcr->result)); + goto err; + } + fqd = mcr->queryfq.fqd; + mcc = qm_mc_start(&p->p); + mcc->queryfq_np.fqid = fqid; + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ_NP); + if (mcr->result != QM_MCR_RESULT_OK) { + pr_err("QUERYFQ_NP failed: %s\n", mcr_result_str(mcr->result)); + goto err; + } + np = mcr->queryfq_np; + /* Phew, have queryfq and queryfq_np results, stitch together + * the FQ object from those. */ + fq->cgr_groupid = fqd.cgid; + switch (np.state & QM_MCR_NP_STATE_MASK) { + case QM_MCR_NP_STATE_OOS: + break; + case QM_MCR_NP_STATE_RETIRED: + fq->state = qman_fq_state_retired; + if (np.frm_cnt) + fq_set(fq, QMAN_FQ_STATE_NE); + break; + case QM_MCR_NP_STATE_TEN_SCHED: + case QM_MCR_NP_STATE_TRU_SCHED: + case QM_MCR_NP_STATE_ACTIVE: + fq->state = qman_fq_state_sched; + if (np.state & QM_MCR_NP_STATE_R) + fq_set(fq, QMAN_FQ_STATE_CHANGING); + break; + case QM_MCR_NP_STATE_PARKED: + fq->state = qman_fq_state_parked; + break; + default: + DPA_ASSERT(NULL == "invalid FQ state"); + } + if (fqd.fq_ctrl & QM_FQCTRL_CGE) + fq->state |= QMAN_FQ_STATE_CGR_EN; + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + return 0; +err: + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) + qman_release_fqid(fqid); + return -EIO; +} +EXPORT_SYMBOL(qman_create_fq); + +void qman_destroy_fq(struct qman_fq *fq, u32 flags __maybe_unused) +{ + /* We don't need to lock the FQ as it is a pre-condition that the FQ be + * quiesced. Instead, run some checks. */ + switch (fq->state) { + case qman_fq_state_parked: + DPA_ASSERT(flags & QMAN_FQ_DESTROY_PARKED); + case qman_fq_state_oos: + if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID)) + qman_release_fqid(fq->fqid); +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP + clear_fq_table_entry(fq->key); +#endif + return; + default: + break; + } + DPA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!"); +} +EXPORT_SYMBOL(qman_destroy_fq); + +u32 qman_fq_fqid(struct qman_fq *fq) +{ + return fq->fqid; +} +EXPORT_SYMBOL(qman_fq_fqid); + +void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags) +{ + if (state) + *state = fq->state; + if (flags) + *flags = fq->flags; +} +EXPORT_SYMBOL(qman_fq_state); + +int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts) +{ + struct qm_mc_command *mcc; + struct qm_mc_result *mcr; + struct qman_portal *p; + unsigned long irqflags __maybe_unused; + u8 res, myverb = (flags & QMAN_INITFQ_FLAG_SCHED) ? + QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED; + + if ((fq->state != qman_fq_state_oos) && + (fq->state != qman_fq_state_parked)) + return -EINVAL; +#ifdef CONFIG_FSL_DPA_CHECKING + if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))) + return -EINVAL; +#endif + if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) { + /* OAC not supported on rev1.0 */ + if (unlikely(qman_ip_rev == QMAN_REV10)) + return -EINVAL; + /* And can't be set at the same time as TDTHRESH */ + if (opts->we_mask & QM_INITFQ_WE_TDTHRESH) + return -EINVAL; + } + /* Issue an INITFQ_[PARKED|SCHED] management command */ + p = get_affine_portal(); + PORTAL_IRQ_LOCK(p, irqflags); + FQLOCK(fq); + if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) || + ((fq->state != qman_fq_state_oos) && + (fq->state != qman_fq_state_parked)))) { + FQUNLOCK(fq); + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + return -EBUSY; + } + mcc = qm_mc_start(&p->p); + if (opts) + mcc->initfq = *opts; + mcc->initfq.fqid = fq->fqid; + mcc->initfq.count = 0; + /* If the FQ does *not* have the TO_DCPORTAL flag, contextB is set as a + * demux pointer. Otherwise, the caller-provided value is allowed to + * stand, don't overwrite it. */ + if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) { + dma_addr_t phys_fq; + mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB; +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP + mcc->initfq.fqd.context_b = fq->key; +#else + mcc->initfq.fqd.context_b = (u32)(uintptr_t)fq; +#endif + /* and the physical address - NB, if the user wasn't trying to + * set CONTEXTA, clear the stashing settings. */ + if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) { + mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA; + memset(&mcc->initfq.fqd.context_a, 0, + sizeof(mcc->initfq.fqd.context_a)); + } else { + phys_fq = dma_map_single(&p->pdev->dev, fq, sizeof(*fq), + DMA_TO_DEVICE); + qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq); + } + } + if (flags & QMAN_INITFQ_FLAG_LOCAL) { + mcc->initfq.fqd.dest.channel = p->config->public_cfg.channel; + if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) { + mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ; + mcc->initfq.fqd.dest.wq = 4; + } + } + qm_mc_commit(&p->p, myverb); + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb); + res = mcr->result; + if (res != QM_MCR_RESULT_OK) { + FQUNLOCK(fq); + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + return -EIO; + } + if (opts) { + if (opts->we_mask & QM_INITFQ_WE_FQCTRL) { + if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE) + fq_set(fq, QMAN_FQ_STATE_CGR_EN); + else + fq_clear(fq, QMAN_FQ_STATE_CGR_EN); + } + if (opts->we_mask & QM_INITFQ_WE_CGID) + fq->cgr_groupid = opts->fqd.cgid; + } + fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ? + qman_fq_state_sched : qman_fq_state_parked; + FQUNLOCK(fq); + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + return 0; +} +EXPORT_SYMBOL(qman_init_fq); + +int qman_schedule_fq(struct qman_fq *fq) +{ + struct qm_mc_command *mcc; + struct qm_mc_result *mcr; + struct qman_portal *p; + unsigned long irqflags __maybe_unused; + int ret = 0; + u8 res; + + if (fq->state != qman_fq_state_parked) + return -EINVAL; +#ifdef CONFIG_FSL_DPA_CHECKING + if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))) + return -EINVAL; +#endif + /* Issue a ALTERFQ_SCHED management command */ + p = get_affine_portal(); + PORTAL_IRQ_LOCK(p, irqflags); + FQLOCK(fq); + if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) || + (fq->state != qman_fq_state_parked))) { + ret = -EBUSY; + goto out; + } + mcc = qm_mc_start(&p->p); + mcc->alterfq.fqid = fq->fqid; + qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED); + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED); + res = mcr->result; + if (res != QM_MCR_RESULT_OK) { + ret = -EIO; + goto out; + } + fq->state = qman_fq_state_sched; +out: + FQUNLOCK(fq); + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + return ret; +} +EXPORT_SYMBOL(qman_schedule_fq); + +int qman_retire_fq(struct qman_fq *fq, u32 *flags) +{ + struct qm_mc_command *mcc; + struct qm_mc_result *mcr; + struct qman_portal *p; + unsigned long irqflags __maybe_unused; + int rval; + u8 res; + + if ((fq->state != qman_fq_state_parked) && + (fq->state != qman_fq_state_sched)) + return -EINVAL; +#ifdef CONFIG_FSL_DPA_CHECKING + if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))) + return -EINVAL; +#endif + p = get_affine_portal(); + PORTAL_IRQ_LOCK(p, irqflags); + FQLOCK(fq); + if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) || + (fq->state == qman_fq_state_retired) || + (fq->state == qman_fq_state_oos))) { + rval = -EBUSY; + goto out; + } + rval = table_push_fq(p, fq); + if (rval) + goto out; + mcc = qm_mc_start(&p->p); + mcc->alterfq.fqid = fq->fqid; + qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE); + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE); + res = mcr->result; + /* "Elegant" would be to treat OK/PENDING the same way; set CHANGING, + * and defer the flags until FQRNI or FQRN (respectively) show up. But + * "Friendly" is to process OK immediately, and not set CHANGING. We do + * friendly, otherwise the caller doesn't necessarily have a fully + * "retired" FQ on return even if the retirement was immediate. However + * this does mean some code duplication between here and + * fq_state_change(). */ + if (likely(res == QM_MCR_RESULT_OK)) { + rval = 0; + /* Process 'fq' right away, we'll ignore FQRNI */ + if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) + fq_set(fq, QMAN_FQ_STATE_NE); + if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT) + fq_set(fq, QMAN_FQ_STATE_ORL); + else + table_del_fq(p, fq); + if (flags) + *flags = fq->flags; + fq->state = qman_fq_state_retired; + if (fq->cb.fqs) { + /* Another issue with supporting "immediate" retirement + * is that we're forced to drop FQRNIs, because by the + * time they're seen it may already be "too late" (the + * fq may have been OOS'd and free()'d already). But if + * the upper layer wants a callback whether it's + * immediate or not, we have to fake a "MR" entry to + * look like an FQRNI... */ + struct qm_mr_entry msg; + msg.verb = QM_MR_VERB_FQRNI; + msg.fq.fqs = mcr->alterfq.fqs; + msg.fq.fqid = fq->fqid; +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP + msg.fq.contextB = fq->key; +#else + msg.fq.contextB = (u32)(uintptr_t)fq; +#endif + fq->cb.fqs(p, fq, &msg); + } + } else if (res == QM_MCR_RESULT_PENDING) { + rval = 1; + fq_set(fq, QMAN_FQ_STATE_CHANGING); + } else { + rval = -EIO; + table_del_fq(p, fq); + } +out: + FQUNLOCK(fq); + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + return rval; +} +EXPORT_SYMBOL(qman_retire_fq); + +int qman_oos_fq(struct qman_fq *fq) +{ + struct qm_mc_command *mcc; + struct qm_mc_result *mcr; + struct qman_portal *p; + unsigned long irqflags __maybe_unused; + int ret = 0; + u8 res; + + if (fq->state != qman_fq_state_retired) + return -EINVAL; +#ifdef CONFIG_FSL_DPA_CHECKING + if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))) + return -EINVAL; +#endif + p = get_affine_portal(); + PORTAL_IRQ_LOCK(p, irqflags); + FQLOCK(fq); + if (unlikely((fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS)) || + (fq->state != qman_fq_state_retired))) { + ret = -EBUSY; + goto out; + } + mcc = qm_mc_start(&p->p); + mcc->alterfq.fqid = fq->fqid; + qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS); + res = mcr->result; + if (res != QM_MCR_RESULT_OK) { + ret = -EIO; + goto out; + } + fq->state = qman_fq_state_oos; +out: + FQUNLOCK(fq); + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + return ret; +} +EXPORT_SYMBOL(qman_oos_fq); + +int qman_fq_flow_control(struct qman_fq *fq, int xon) +{ + struct qm_mc_command *mcc; + struct qm_mc_result *mcr; + struct qman_portal *p; + unsigned long irqflags __maybe_unused; + int ret = 0; + u8 res; + u8 myverb; + + if ((fq->state == qman_fq_state_oos) || + (fq->state == qman_fq_state_retired) || + (fq->state == qman_fq_state_parked)) + return -EINVAL; + +#ifdef CONFIG_FSL_DPA_CHECKING + if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))) + return -EINVAL; +#endif + /* Issue a ALTER_FQXON or ALTER_FQXOFF management command */ + p = get_affine_portal(); + PORTAL_IRQ_LOCK(p, irqflags); + FQLOCK(fq); + if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) || + (fq->state == qman_fq_state_parked) || + (fq->state == qman_fq_state_oos) || + (fq->state == qman_fq_state_retired))) { + ret = -EBUSY; + goto out; + } + mcc = qm_mc_start(&p->p); + mcc->alterfq.fqid = fq->fqid; + mcc->alterfq.count = 0; + myverb = xon ? QM_MCC_VERB_ALTER_FQXON : QM_MCC_VERB_ALTER_FQXOFF; + + qm_mc_commit(&p->p, myverb); + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb); + + res = mcr->result; + if (res != QM_MCR_RESULT_OK) { + ret = -EIO; + goto out; + } +out: + FQUNLOCK(fq); + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + return ret; +} +EXPORT_SYMBOL(qman_fq_flow_control); + +int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd) +{ + struct qm_mc_command *mcc; + struct qm_mc_result *mcr; + struct qman_portal *p = get_affine_portal(); + unsigned long irqflags __maybe_unused; + u8 res; + + PORTAL_IRQ_LOCK(p, irqflags); + mcc = qm_mc_start(&p->p); + mcc->queryfq.fqid = fq->fqid; + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ); + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ); + res = mcr->result; + if (res == QM_MCR_RESULT_OK) + *fqd = mcr->queryfq.fqd; + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + if (res != QM_MCR_RESULT_OK) + return -EIO; + return 0; +} +EXPORT_SYMBOL(qman_query_fq); + +int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np) +{ + struct qm_mc_command *mcc; + struct qm_mc_result *mcr; + struct qman_portal *p = get_affine_portal(); + unsigned long irqflags __maybe_unused; + u8 res; + + PORTAL_IRQ_LOCK(p, irqflags); + mcc = qm_mc_start(&p->p); + mcc->queryfq.fqid = fq->fqid; + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP); + res = mcr->result; + if (res == QM_MCR_RESULT_OK) + *np = mcr->queryfq_np; + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + if (res != QM_MCR_RESULT_OK) + return -EIO; + return 0; +} +EXPORT_SYMBOL(qman_query_fq_np); + +int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq) +{ + struct qm_mc_command *mcc; + struct qm_mc_result *mcr; + struct qman_portal *p = get_affine_portal(); + unsigned long irqflags __maybe_unused; + u8 res, myverb; + + PORTAL_IRQ_LOCK(p, irqflags); + myverb = (query_dedicated) ? QM_MCR_VERB_QUERYWQ_DEDICATED : + QM_MCR_VERB_QUERYWQ; + mcc = qm_mc_start(&p->p); + mcc->querywq.channel.id = wq->channel.id; + qm_mc_commit(&p->p, myverb); + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb); + res = mcr->result; + if (res == QM_MCR_RESULT_OK) + *wq = mcr->querywq; + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + if (res != QM_MCR_RESULT_OK) { + pr_err("QUERYWQ failed: %s\n", mcr_result_str(res)); + return -EIO; + } + return 0; +} +EXPORT_SYMBOL(qman_query_wq); + +int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt, + struct qm_mcr_cgrtestwrite *result) +{ + struct qm_mc_command *mcc; + struct qm_mc_result *mcr; + struct qman_portal *p = get_affine_portal(); + unsigned long irqflags __maybe_unused; + u8 res; + + PORTAL_IRQ_LOCK(p, irqflags); + mcc = qm_mc_start(&p->p); + mcc->cgrtestwrite.cgid = cgr->cgrid; + mcc->cgrtestwrite.i_bcnt_hi = (u8)(i_bcnt >> 32); + mcc->cgrtestwrite.i_bcnt_lo = (u32)i_bcnt; + qm_mc_commit(&p->p, QM_MCC_VERB_CGRTESTWRITE); + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_CGRTESTWRITE); + res = mcr->result; + if (res == QM_MCR_RESULT_OK) + *result = mcr->cgrtestwrite; + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + if (res != QM_MCR_RESULT_OK) { + pr_err("CGR TEST WRITE failed: %s\n", mcr_result_str(res)); + return -EIO; + } + return 0; +} +EXPORT_SYMBOL(qman_testwrite_cgr); + +int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *cgrd) +{ + struct qm_mc_command *mcc; + struct qm_mc_result *mcr; + struct qman_portal *p = get_affine_portal(); + unsigned long irqflags __maybe_unused; + u8 res; + + PORTAL_IRQ_LOCK(p, irqflags); + mcc = qm_mc_start(&p->p); + mcc->querycgr.cgid = cgr->cgrid; + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR); + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR); + res = mcr->result; + if (res == QM_MCR_RESULT_OK) + *cgrd = mcr->querycgr; + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + if (res != QM_MCR_RESULT_OK) { + pr_err("QUERY_CGR failed: %s\n", mcr_result_str(res)); + return -EIO; + } + return 0; +} +EXPORT_SYMBOL(qman_query_cgr); + +int qman_query_congestion(struct qm_mcr_querycongestion *congestion) +{ + struct qm_mc_result *mcr; + struct qman_portal *p = get_affine_portal(); + unsigned long irqflags __maybe_unused; + u8 res; + + PORTAL_IRQ_LOCK(p, irqflags); + qm_mc_start(&p->p); + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION); + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == + QM_MCC_VERB_QUERYCONGESTION); + res = mcr->result; + if (res == QM_MCR_RESULT_OK) + *congestion = mcr->querycongestion; + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + if (res != QM_MCR_RESULT_OK) { + pr_err("QUERY_CONGESTION failed: %s\n", mcr_result_str(res)); + return -EIO; + } + return 0; +} +EXPORT_SYMBOL(qman_query_congestion); + +/* internal function used as a wait_event() expression */ +static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr) +{ + unsigned long irqflags __maybe_unused; + int ret = -EBUSY; + *p = get_affine_portal(); + PORTAL_IRQ_LOCK(*p, irqflags); + if (!(*p)->vdqcr_owned) { + FQLOCK(fq); + if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) + goto escape; + fq_set(fq, QMAN_FQ_STATE_VDQCR); + FQUNLOCK(fq); + (*p)->vdqcr_owned = fq; + ret = 0; + } +escape: + PORTAL_IRQ_UNLOCK(*p, irqflags); + if (!ret) + qm_dqrr_vdqcr_set(&(*p)->p, vdqcr); + put_affine_portal(); + return ret; +} + +#ifdef CONFIG_FSL_DPA_CAN_WAIT +static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq, + u32 vdqcr, u32 flags) +{ + int ret = 0; + if (flags & QMAN_VOLATILE_FLAG_WAIT_INT) + ret = wait_event_interruptible(affine_queue, + !(ret = set_vdqcr(p, fq, vdqcr))); + else + wait_event(affine_queue, !(ret = set_vdqcr(p, fq, vdqcr))); + return ret; +} +#endif + +int qman_volatile_dequeue(struct qman_fq *fq, u32 flags __maybe_unused, + u32 vdqcr) +{ + struct qman_portal *p; + int ret; + + if ((fq->state != qman_fq_state_parked) && + (fq->state != qman_fq_state_retired)) + return -EINVAL; + if (vdqcr & QM_VDQCR_FQID_MASK) + return -EINVAL; + if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) + return -EBUSY; + vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid; +#ifdef CONFIG_FSL_DPA_CAN_WAIT + if (flags & QMAN_VOLATILE_FLAG_WAIT) + ret = wait_vdqcr_start(&p, fq, vdqcr, flags); + else +#endif + ret = set_vdqcr(&p, fq, vdqcr); + if (ret) + return ret; + /* VDQCR is set */ +#ifdef CONFIG_FSL_DPA_CAN_WAIT + if (flags & QMAN_VOLATILE_FLAG_FINISH) { + if (flags & QMAN_VOLATILE_FLAG_WAIT_INT) + /* NB: don't propagate any error - the caller wouldn't + * know whether the VDQCR was issued or not. A signal + * could arrive after returning anyway, so the caller + * can check signal_pending() if that's an issue. */ + wait_event_interruptible(affine_queue, + !fq_isset(fq, QMAN_FQ_STATE_VDQCR)); + else + wait_event(affine_queue, + !fq_isset(fq, QMAN_FQ_STATE_VDQCR)); + } +#endif + return 0; +} +EXPORT_SYMBOL(qman_volatile_dequeue); + +static noinline void update_eqcr_ci(struct qman_portal *p, u8 avail) +{ + if (avail) + qm_eqcr_cce_prefetch(&p->p); + else + qm_eqcr_cce_update(&p->p); +} + +int qman_eqcr_is_empty(void) +{ + unsigned long irqflags __maybe_unused; + struct qman_portal *p = get_affine_portal(); + u8 avail; + + PORTAL_IRQ_LOCK(p, irqflags); + update_eqcr_ci(p, 0); + avail = qm_eqcr_get_fill(&p->p); + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + return (avail == 0); +} +EXPORT_SYMBOL(qman_eqcr_is_empty); + +void qman_set_dc_ern(qman_cb_dc_ern handler, int affine) +{ + if (affine) { + unsigned long irqflags __maybe_unused; + struct qman_portal *p = get_affine_portal(); + PORTAL_IRQ_LOCK(p, irqflags); + p->cb_dc_ern = handler; + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + } else + cb_dc_ern = handler; +} +EXPORT_SYMBOL(qman_set_dc_ern); + +static inline struct qm_eqcr_entry *try_eq_start(struct qman_portal **p, + unsigned long *irqflags __maybe_unused, + struct qman_fq *fq, + const struct qm_fd *fd, + u32 flags) +{ + struct qm_eqcr_entry *eq; + u8 avail; + + *p = get_affine_portal(); + PORTAL_IRQ_LOCK(*p, (*irqflags)); +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC + if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) && + (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) { + if ((*p)->eqci_owned) { + PORTAL_IRQ_UNLOCK(*p, (*irqflags)); + put_affine_portal(); + return NULL; + } + (*p)->eqci_owned = fq; + } +#endif + avail = qm_eqcr_get_avail(&(*p)->p); + if (avail < 2) + update_eqcr_ci(*p, avail); + eq = qm_eqcr_start(&(*p)->p); + if (unlikely(!eq)) { +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC + if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) && + (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) + (*p)->eqci_owned = NULL; +#endif + PORTAL_IRQ_UNLOCK(*p, (*irqflags)); + put_affine_portal(); + return NULL; + } + if (flags & QMAN_ENQUEUE_FLAG_DCA) + eq->dca = QM_EQCR_DCA_ENABLE | + ((flags & QMAN_ENQUEUE_FLAG_DCA_PARK) ? + QM_EQCR_DCA_PARK : 0) | + ((flags >> 8) & QM_EQCR_DCA_IDXMASK); + eq->fqid = fq->fqid; +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP + eq->tag = fq->key; +#else + eq->tag = (u32)(uintptr_t)fq; +#endif + /* From p4080 rev1 -> rev2, the FD struct's address went from 48-bit to + * 40-bit but rev1 chips will still interpret it as 48-bit, meaning we + * have to scrub the upper 8-bits, just in case the user left noise in + * there. Doing this selectively via a run-time check of the h/w + * revision (as we do for most errata, for example) is too slow in this + * critical path code. The most inexpensive way to handle this is just + * to reinterpret the FD as 4 32-bit words and to mask the first word + * appropriately, irrespecitive of the h/w revision. The struct fields + * corresponding to this word are; + * u8 dd:2; + * u8 liodn_offset:6; + * u8 bpid; + * u8 eliodn_offset:4; + * u8 __reserved:4; + * u8 addr_hi; + * So we mask this word with 0xc0ff00ff, which implicitly scrubs out + * liodn_offset, eliodn_offset, and __reserved - the latter two fields + * are interpreted as the 8 msbits of the 48-bit address in the case of + * rev1. + */ + { + const u32 *src = (const u32 *)fd; + u32 *dest = (u32 *)&eq->fd; + dest[0] = src[0] & 0xc0ff00ff; + dest[1] = src[1]; + dest[2] = src[2]; + dest[3] = src[3]; + } + return eq; +} + +#ifdef CONFIG_FSL_DPA_CAN_WAIT +static noinline struct qm_eqcr_entry *__wait_eq_start(struct qman_portal **p, + unsigned long *irqflags __maybe_unused, + struct qman_fq *fq, + const struct qm_fd *fd, + u32 flags) +{ + struct qm_eqcr_entry *eq = try_eq_start(p, irqflags, fq, fd, flags); + if (!eq) + qm_eqcr_set_ithresh(&(*p)->p, EQCR_ITHRESH); + return eq; +} +static noinline struct qm_eqcr_entry *wait_eq_start(struct qman_portal **p, + unsigned long *irqflags __maybe_unused, + struct qman_fq *fq, + const struct qm_fd *fd, + u32 flags) +{ + struct qm_eqcr_entry *eq; + if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT) + wait_event_interruptible(affine_queue, + (eq = __wait_eq_start(p, irqflags, fq, fd, flags))); + else + wait_event(affine_queue, + (eq = __wait_eq_start(p, irqflags, fq, fd, flags))); + return eq; +} +#endif + +int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags) +{ + struct qman_portal *p; + struct qm_eqcr_entry *eq; + unsigned long irqflags __maybe_unused; + +#ifdef CONFIG_FSL_DPA_CAN_WAIT + if (flags & QMAN_ENQUEUE_FLAG_WAIT) + eq = wait_eq_start(&p, &irqflags, fq, fd, flags); + else +#endif + eq = try_eq_start(&p, &irqflags, fq, fd, flags); + if (!eq) + return -EBUSY; + /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */ + qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE | + (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT))); + /* Factor the below out, it's used from qman_enqueue_orp() too */ + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC + if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) && + (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) { + if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT) + wait_event_interruptible(affine_queue, + (p->eqci_owned != fq)); + else + wait_event(affine_queue, (p->eqci_owned != fq)); + } +#endif + return 0; +} +EXPORT_SYMBOL(qman_enqueue); + +int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags, + struct qman_fq *orp, u16 orp_seqnum) +{ + struct qman_portal *p; + struct qm_eqcr_entry *eq; + unsigned long irqflags __maybe_unused; + +#ifdef CONFIG_FSL_DPA_CAN_WAIT + if (flags & QMAN_ENQUEUE_FLAG_WAIT) + eq = wait_eq_start(&p, &irqflags, fq, fd, flags); + else +#endif + eq = try_eq_start(&p, &irqflags, fq, fd, flags); + if (!eq) + return -EBUSY; + /* Process ORP-specifics here */ + if (flags & QMAN_ENQUEUE_FLAG_NLIS) + orp_seqnum |= QM_EQCR_SEQNUM_NLIS; + else { + orp_seqnum &= ~QM_EQCR_SEQNUM_NLIS; + if (flags & QMAN_ENQUEUE_FLAG_NESN) + orp_seqnum |= QM_EQCR_SEQNUM_NESN; + else + /* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */ + orp_seqnum &= ~QM_EQCR_SEQNUM_NESN; + } + eq->seqnum = orp_seqnum; + eq->orp = orp->fqid; + /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */ + qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_ORP | + ((flags & (QMAN_ENQUEUE_FLAG_HOLE | QMAN_ENQUEUE_FLAG_NESN)) ? + 0 : QM_EQCR_VERB_CMD_ENQUEUE) | + (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT))); + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC + if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) && + (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) { + if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT) + wait_event_interruptible(affine_queue, + (p->eqci_owned != fq)); + else + wait_event(affine_queue, (p->eqci_owned != fq)); + } +#endif + return 0; +} +EXPORT_SYMBOL(qman_enqueue_orp); + +int qman_modify_cgr(struct qman_cgr *cgr, u32 flags, + struct qm_mcc_initcgr *opts) +{ + struct qm_mc_command *mcc; + struct qm_mc_result *mcr; + struct qman_portal *p = get_affine_portal(); + unsigned long irqflags __maybe_unused; + u8 res; + u8 verb = QM_MCC_VERB_MODIFYCGR; + + /* frame mode not supported on rev1.0 */ + if (unlikely(qman_ip_rev == QMAN_REV10)) { + if (opts && (opts->we_mask & QM_CGR_WE_MODE) && + opts->cgr.mode == QMAN_CGR_MODE_FRAME) { + put_affine_portal(); + return -EIO; + } + } + PORTAL_IRQ_LOCK(p, irqflags); + mcc = qm_mc_start(&p->p); + if (opts) + mcc->initcgr = *opts; + mcc->initcgr.cgid = cgr->cgrid; + if (flags & QMAN_CGR_FLAG_USE_INIT) + verb = QM_MCC_VERB_INITCGR; + qm_mc_commit(&p->p, verb); + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb); + res = mcr->result; + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + return (res == QM_MCR_RESULT_OK) ? 0 : -EIO; +} +EXPORT_SYMBOL(qman_modify_cgr); + +#define TARG_MASK(n) (0x80000000 >> (n->config->public_cfg.channel - \ + QM_CHANNEL_SWPORTAL0)) +#define PORTAL_IDX(n) (n->config->public_cfg.channel - QM_CHANNEL_SWPORTAL0) + +int qman_create_cgr(struct qman_cgr *cgr, u32 flags, + struct qm_mcc_initcgr *opts) +{ + unsigned long irqflags __maybe_unused; + struct qm_mcr_querycgr cgr_state; + struct qm_mcc_initcgr local_opts; + int ret; + struct qman_portal *p; + + /* We have to check that the provided CGRID is within the limits of the + * data-structures, for obvious reasons. However we'll let h/w take + * care of determining whether it's within the limits of what exists on + * the SoC. */ + if (cgr->cgrid >= __CGR_NUM) + return -EINVAL; + + p = get_affine_portal(); + + memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr)); + cgr->chan = p->config->public_cfg.channel; + spin_lock_irqsave(&p->cgr_lock, irqflags); + + /* if no opts specified and I'm not the first for this portal, just add + * to the list */ + if ((opts == NULL) && !list_empty(&p->cgr_cbs[cgr->cgrid])) + goto add_list; + + ret = qman_query_cgr(cgr, &cgr_state); + if (ret) + goto release_lock; + if (opts) + local_opts = *opts; + if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) + local_opts.cgr.cscn_targ_upd_ctrl = + QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p); + else + /* Overwrite TARG */ + local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ | + TARG_MASK(p); + local_opts.we_mask |= QM_CGR_WE_CSCN_TARG; + + /* send init if flags indicate so */ + if (opts && (flags & QMAN_CGR_FLAG_USE_INIT)) + ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, &local_opts); + else + ret = qman_modify_cgr(cgr, 0, &local_opts); + if (ret) + goto release_lock; +add_list: + list_add(&cgr->node, &p->cgr_cbs[cgr->cgrid]); + + /* Determine if newly added object requires its callback to be called */ + ret = qman_query_cgr(cgr, &cgr_state); + if (ret) { + /* we can't go back, so proceed and return success, but screen + * and wail to the log file */ + pr_crit("CGR HW state partially modified\n"); + ret = 0; + goto release_lock; + } + if (cgr->cb && cgr_state.cgr.cscn_en && qman_cgrs_get(&p->cgrs[1], + cgr->cgrid)) + cgr->cb(p, cgr, 1); +release_lock: + spin_unlock_irqrestore(&p->cgr_lock, irqflags); + put_affine_portal(); + return ret; +} +EXPORT_SYMBOL(qman_create_cgr); + +int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal, + struct qm_mcc_initcgr *opts) +{ + unsigned long irqflags __maybe_unused; + struct qm_mcc_initcgr local_opts; + int ret; + + if ((qman_ip_rev & 0xFF00) < QMAN_REV30) { + pr_warning("This QMan version doesn't support to send CSCN to" + " DCP portal\n"); + return -EINVAL; + } + /* We have to check that the provided CGRID is within the limits of the + * data-structures, for obvious reasons. However we'll let h/w take + * care of determining whether it's within the limits of what exists on + * the SoC. + */ + if (cgr->cgrid >= __CGR_NUM) + return -EINVAL; + + memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr)); + if (opts) + local_opts = *opts; + + local_opts.cgr.cscn_targ_upd_ctrl = QM_CGR_TARG_UDP_CTRL_WRITE_BIT | + QM_CGR_TARG_UDP_CTRL_DCP | dcp_portal; + local_opts.we_mask |= QM_CGR_WE_CSCN_TARG; + + /* send init if flags indicate so */ + if (opts && (flags & QMAN_CGR_FLAG_USE_INIT)) + ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, + &local_opts); + else + ret = qman_modify_cgr(cgr, 0, &local_opts); + + return ret; +} +EXPORT_SYMBOL(qman_create_cgr_to_dcp); + +int qman_delete_cgr(struct qman_cgr *cgr) +{ + unsigned long irqflags __maybe_unused; + struct qm_mcr_querycgr cgr_state; + struct qm_mcc_initcgr local_opts; + int ret = 0; + struct qman_portal *p = get_affine_portal(); + + if (cgr->chan != p->config->public_cfg.channel) { + pr_crit("Attempting to delete cgr from different portal " + "than it was create: create 0x%x, delete 0x%x\n", + cgr->chan, p->config->public_cfg.channel); + ret = -EINVAL; + goto put_portal; + } + memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr)); + spin_lock_irqsave(&p->cgr_lock, irqflags); + list_del(&cgr->node); + /* If last in list, CSCN_TARG must be set accordingly */ + if (!list_empty(&p->cgr_cbs[cgr->cgrid])) + goto release_lock; + ret = qman_query_cgr(cgr, &cgr_state); + if (ret) { + /* add back to the list */ + list_add(&cgr->node, &p->cgr_cbs[cgr->cgrid]); + goto release_lock; + } + /* Overwrite TARG */ + local_opts.we_mask = QM_CGR_WE_CSCN_TARG; + if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) + local_opts.cgr.cscn_targ_upd_ctrl = + ~QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p); + else + local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ & + ~(TARG_MASK(p)); + ret = qman_modify_cgr(cgr, 0, &local_opts); + if (ret) + /* add back to the list */ + list_add(&cgr->node, &p->cgr_cbs[cgr->cgrid]); +release_lock: + spin_unlock_irqrestore(&p->cgr_lock, irqflags); +put_portal: + put_affine_portal(); + return ret; +} +EXPORT_SYMBOL(qman_delete_cgr); + +int qm_get_clock(u64 *clock_hz) +{ + if (!qman_clk) { + pr_warning("Qman clock speed is unknown\n"); + return -EINVAL; + } + *clock_hz = (u64)qman_clk; + return 0; +} +EXPORT_SYMBOL(qm_get_clock); + +int qm_set_clock(u64 clock_hz) +{ + if (qman_clk) + return -1; + qman_clk = (u32)clock_hz; + return 0; +} +EXPORT_SYMBOL(qm_set_clock); + +/* CEETM management command */ +int qman_ceetm_configure_lfqmt(struct qm_mcc_ceetm_lfqmt_config *opts) +{ + struct qm_mc_command *mcc; + struct qm_mc_result *mcr; + struct qman_portal *p; + unsigned long irqflags __maybe_unused; + u8 res; + + p = get_affine_portal(); + PORTAL_IRQ_LOCK(p, irqflags); + + mcc = qm_mc_start(&p->p); + mcc->lfqmt_config = *opts; + qm_mc_commit(&p->p, QM_CEETM_VERB_LFQMT_CONFIG); + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == + QM_CEETM_VERB_LFQMT_CONFIG); + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + + res = mcr->result; + if (res != QM_MCR_RESULT_OK) { + pr_err("CEETM: CONFIGURE LFQMT failed\n"); + return -EIO; + } + return 0; +} + +int qman_ceetm_query_lfqmt(int lfqid, + struct qm_mcr_ceetm_lfqmt_query *lfqmt_query) +{ + struct qm_mc_command *mcc; + struct qm_mc_result *mcr; + struct qman_portal *p; + unsigned long irqflags __maybe_unused; + u8 res; + + p = get_affine_portal(); + PORTAL_IRQ_LOCK(p, irqflags); + + mcc = qm_mc_start(&p->p); + mcc->lfqmt_query.lfqid = lfqid; + qm_mc_commit(&p->p, QM_CEETM_VERB_LFQMT_QUERY); + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_LFQMT_QUERY); + res = mcr->result; + if (res == QM_MCR_RESULT_OK) + *lfqmt_query = mcr->lfqmt_query; + + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + if (res != QM_MCR_RESULT_OK) { + pr_err("CEETM: QUERY LFQMT failed\n"); + return -EIO; + } + return 0; +} + +int qman_ceetm_configure_cq(struct qm_mcc_ceetm_cq_config *opts) +{ + struct qm_mc_command *mcc; + struct qm_mc_result *mcr; + struct qman_portal *p; + unsigned long irqflags __maybe_unused; + u8 res; + + p = get_affine_portal(); + PORTAL_IRQ_LOCK(p, irqflags); + + mcc = qm_mc_start(&p->p); + mcc->cq_config = *opts; + qm_mc_commit(&p->p, QM_CEETM_VERB_CQ_CONFIG); + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + res = mcr->result; + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_CQ_CONFIG); + + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + + if (res != QM_MCR_RESULT_OK) { + pr_err("CEETM: CONFIGURE CQ failed\n"); + return -EIO; + } + return 0; +} + +int qman_ceetm_query_cq(unsigned int cqid, unsigned int dcpid, + struct qm_mcr_ceetm_cq_query *cq_query) +{ + struct qm_mc_command *mcc; + struct qm_mc_result *mcr; + struct qman_portal *p; + unsigned long irqflags __maybe_unused; + u8 res; + + p = get_affine_portal(); + PORTAL_IRQ_LOCK(p, irqflags); + + mcc = qm_mc_start(&p->p); + mcc->cq_query.cqid = cqid; + mcc->cq_query.dcpid = dcpid; + qm_mc_commit(&p->p, QM_CEETM_VERB_CQ_QUERY); + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + res = mcr->result; + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_CQ_QUERY); + + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + + if (res != QM_MCR_RESULT_OK) { + pr_err("CEETM: QUERY CQ failed\n"); + return -EIO; + } + + *cq_query = mcr->cq_query; + return 0; +} + +int qman_ceetm_configure_dct(struct qm_mcc_ceetm_dct_config *opts) +{ + struct qm_mc_command *mcc; + struct qm_mc_result *mcr; + struct qman_portal *p; + unsigned long irqflags __maybe_unused; + u8 res; + + p = get_affine_portal(); + PORTAL_IRQ_LOCK(p, irqflags); + + mcc = qm_mc_start(&p->p); + mcc->dct_config = *opts; + qm_mc_commit(&p->p, QM_CEETM_VERB_DCT_CONFIG); + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_DCT_CONFIG); + res = mcr->result; + + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + + if (res != QM_MCR_RESULT_OK) { + pr_err("CEETM: CONFIGURE DCT failed\n"); + return -EIO; + } + return 0; +} + +int qman_ceetm_query_dct(struct qm_mcc_ceetm_dct_query *opts, + struct qm_mcr_ceetm_dct_query *dct_query) +{ + struct qm_mc_command *mcc; + struct qm_mc_result *mcr; + struct qman_portal *p = get_affine_portal(); + unsigned long irqflags __maybe_unused; + u8 res; + + PORTAL_IRQ_LOCK(p, irqflags); + + mcc = qm_mc_start(&p->p); + mcc->dct_query = *opts; + qm_mc_commit(&p->p, QM_CEETM_VERB_DCT_QUERY); + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_DCT_QUERY); + res = mcr->result; + + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + + if (res != QM_MCR_RESULT_OK) { + pr_err("CEETM: QUERY DCT failed\n"); + return -EIO; + } + + *dct_query = mcr->dct_query; + return 0; +} + +int qman_ceetm_configure_class_scheduler( + struct qm_mcc_ceetm_class_scheduler_config *opts) +{ + struct qm_mc_command *mcc; + struct qm_mc_result *mcr; + struct qman_portal *p; + unsigned long irqflags __maybe_unused; + u8 res; + + p = get_affine_portal(); + PORTAL_IRQ_LOCK(p, irqflags); + + mcc = qm_mc_start(&p->p); + mcc->csch_config = *opts; + qm_mc_commit(&p->p, QM_CEETM_VERB_CLASS_SCHEDULER_CONFIG); + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == + QM_CEETM_VERB_CLASS_SCHEDULER_CONFIG); + res = mcr->result; + + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + + if (res != QM_MCR_RESULT_OK) { + pr_err("CEETM: CONFIGURE CLASS SCHEDULER failed\n"); + return -EIO; + } + return 0; +} + +int qman_ceetm_query_class_scheduler(struct qm_ceetm_channel *channel, + struct qm_mcr_ceetm_class_scheduler_query *query) +{ + struct qm_mc_command *mcc; + struct qm_mc_result *mcr; + struct qman_portal *p; + unsigned long irqflags __maybe_unused; + u8 res; + + p = get_affine_portal(); + PORTAL_IRQ_LOCK(p, irqflags); + + mcc = qm_mc_start(&p->p); + mcc->csch_query.cqcid = channel->idx; + mcc->csch_query.dcpid = channel->dcp_idx; + qm_mc_commit(&p->p, QM_CEETM_VERB_CLASS_SCHEDULER_QUERY); + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == + QM_CEETM_VERB_CLASS_SCHEDULER_QUERY); + res = mcr->result; + + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + + if (res != QM_MCR_RESULT_OK) { + pr_err("CEETM: QUERY CLASS SCHEDULER failed\n"); + return -EIO; + } + *query = mcr->csch_query; + return 0; +} + +int qman_ceetm_configure_mapping_shaper_tcfc( + struct qm_mcc_ceetm_mapping_shaper_tcfc_config *opts) +{ + struct qm_mc_command *mcc; + struct qm_mc_result *mcr; + struct qman_portal *p; + unsigned long irqflags __maybe_unused; + u8 res; + + p = get_affine_portal(); + PORTAL_IRQ_LOCK(p, irqflags); + + mcc = qm_mc_start(&p->p); + mcc->mst_config = *opts; + qm_mc_commit(&p->p, QM_CEETM_VERB_MAPPING_SHAPER_TCFC_CONFIG); + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == + QM_CEETM_VERB_MAPPING_SHAPER_TCFC_CONFIG); + res = mcr->result; + + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + + if (res != QM_MCR_RESULT_OK) { + pr_err("CEETM: CONFIGURE CHANNEL MAPPING failed\n"); + return -EIO; + } + return 0; +} + +int qman_ceetm_query_mapping_shaper_tcfc( + struct qm_mcc_ceetm_mapping_shaper_tcfc_query *opts, + struct qm_mcr_ceetm_mapping_shaper_tcfc_query *response) +{ + struct qm_mc_command *mcc; + struct qm_mc_result *mcr; + struct qman_portal *p; + unsigned long irqflags __maybe_unused; + u8 res; + + p = get_affine_portal(); + PORTAL_IRQ_LOCK(p, irqflags); + + mcc = qm_mc_start(&p->p); + mcc->mst_query = *opts; + qm_mc_commit(&p->p, QM_CEETM_VERB_MAPPING_SHAPER_TCFC_QUERY); + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == + QM_CEETM_VERB_MAPPING_SHAPER_TCFC_QUERY); + res = mcr->result; + + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + + if (res != QM_MCR_RESULT_OK) { + pr_err("CEETM: QUERY CHANNEL MAPPING failed\n"); + return -EIO; + } + + *response = mcr->mst_query; + return 0; +} + +int qman_ceetm_configure_ccgr(struct qm_mcc_ceetm_ccgr_config *opts) +{ + struct qm_mc_command *mcc; + struct qm_mc_result *mcr; + struct qman_portal *p; + unsigned long irqflags __maybe_unused; + u8 res; + + p = get_affine_portal(); + PORTAL_IRQ_LOCK(p, irqflags); + + mcc = qm_mc_start(&p->p); + mcc->ccgr_config = *opts; + + qm_mc_commit(&p->p, QM_CEETM_VERB_CCGR_CONFIG); + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_CCGR_CONFIG); + + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + + res = mcr->result; + if (res != QM_MCR_RESULT_OK) { + pr_err("CEETM: CONFIGURE CCGR failed\n"); + return -EIO; + } + return 0; +} + +int qman_ceetm_query_ccgr(struct qm_mcc_ceetm_ccgr_query *ccgr_query, + struct qm_mcr_ceetm_ccgr_query *response) +{ + struct qm_mc_command *mcc; + struct qm_mc_result *mcr; + struct qman_portal *p; + unsigned long irqflags __maybe_unused; + u8 res; + + p = get_affine_portal(); + PORTAL_IRQ_LOCK(p, irqflags); + + mcc = qm_mc_start(&p->p); + mcc->ccgr_query = *ccgr_query; + qm_mc_commit(&p->p, QM_CEETM_VERB_CCGR_QUERY); + + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_CCGR_QUERY); + + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + + res = mcr->result; + if (res != QM_MCR_RESULT_OK) { + pr_err("CEETM: QUERY CCGR failed\n"); + return -EIO; + } + *response = mcr->ccgr_query; + return 0; +} + +int qman_ceetm_cq_peek_pop_xsfdrread(struct qm_ceetm_cq *cq, + u8 command_type, u16 xsfdr, + struct qm_mcr_ceetm_cq_peek_pop_xsfdrread *cq_ppxr) +{ + struct qm_mc_command *mcc; + struct qm_mc_result *mcr; + struct qman_portal *p; + unsigned long irqflags __maybe_unused; + u8 res; + + p = get_affine_portal(); + PORTAL_IRQ_LOCK(p, irqflags); + + mcc = qm_mc_start(&p->p); + switch (command_type) { + case 0: + case 1: + mcc->cq_ppxr.cqid = (cq->parent->idx << 4) | cq->idx; + break; + case 2: + mcc->cq_ppxr.xsfdr = xsfdr; + break; + default: + break; + } + mcc->cq_ppxr.ct = command_type; + mcc->cq_ppxr.dcpid = cq->parent->dcp_idx; + qm_mc_commit(&p->p, QM_CEETM_VERB_CQ_PEEK_POP_XFDRREAD); + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == + QM_CEETM_VERB_CQ_PEEK_POP_XFDRREAD); + + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + + res = mcr->result; + if (res != QM_MCR_RESULT_OK) { + pr_err("CEETM: CQ PEEK/POP/XSFDR READ failed\n"); + return -EIO; + } + *cq_ppxr = mcr->cq_ppxr; + return 0; +} + +int qman_ceetm_query_statistics(u16 cid, + enum qm_dc_portal dcp_idx, + u16 command_type, + struct qm_mcr_ceetm_statistics_query *query_result) +{ + struct qm_mc_command *mcc; + struct qm_mc_result *mcr; + struct qman_portal *p; + unsigned long irqflags __maybe_unused; + u8 res; + + p = get_affine_portal(); + PORTAL_IRQ_LOCK(p, irqflags); + + mcc = qm_mc_start(&p->p); + mcc->stats_query_write.cid = cid; + mcc->stats_query_write.dcpid = dcp_idx; + mcc->stats_query_write.ct = command_type; + qm_mc_commit(&p->p, QM_CEETM_VERB_STATISTICS_QUERY_WRITE); + + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == + QM_CEETM_VERB_STATISTICS_QUERY_WRITE); + + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + + res = mcr->result; + if (res != QM_MCR_RESULT_OK) { + pr_err("CEETM: STATISTICS QUERY failed\n"); + return -EIO; + } + *query_result = mcr->stats_query; + return 0; +} + +int qman_ceetm_write_statistics(u16 cid, enum qm_dc_portal dcp_idx, + u16 command_type, u64 frame_count, u64 byte_count) +{ + struct qm_mc_command *mcc; + struct qm_mc_result *mcr; + struct qman_portal *p; + unsigned long irqflags __maybe_unused; + u8 res; + + p = get_affine_portal(); + PORTAL_IRQ_LOCK(p, irqflags); + + mcc = qm_mc_start(&p->p); + mcc->stats_query_write.cid = cid; + mcc->stats_query_write.dcpid = dcp_idx; + mcc->stats_query_write.ct = command_type; + mcc->stats_query_write.frm_cnt = frame_count; + mcc->stats_query_write.byte_cnt = byte_count; + qm_mc_commit(&p->p, QM_CEETM_VERB_STATISTICS_QUERY_WRITE); + + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == + QM_CEETM_VERB_STATISTICS_QUERY_WRITE); + + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + + res = mcr->result; + if (res != QM_MCR_RESULT_OK) { + pr_err("CEETM: STATISTICS WRITE failed\n"); + return -EIO; + } + return 0; +} + +int qman_ceetm_bps2tokenrate(u32 bps, struct qm_ceetm_rate *token_rate, + int rounding) +{ + u16 pres; + u64 temp; + u64 qman_freq; + int ret; + + /* Read PRES from CEET_CFG_PRES register */ + ret = qman_ceetm_get_prescaler(&pres); + if (ret) + return -EINVAL; + + ret = qm_get_clock(&qman_freq); + if (ret) + return -EINVAL; + + /* token-rate = bytes-per-second * update-reference-period + * + * Where token-rate is N/8192 for a interger N, and + * update-reference-period is (2^22)/(PRES*QHz), where PRES + * is the prescalar value and QHz is the QMan clock frequency. + * So: + * + * token-rate = (byte-per-second*2^22)/PRES*QHZ) + * + * Converting to bits-per-second gives; + * + * token-rate = (bps*2^19) / (PRES*QHZ) + * N = (bps*2^32) / (PRES*QHz) + * + */ + temp = ROUNDING(((u64)bps << 32), pres, rounding); + temp = ROUNDING(temp, qman_freq, rounding); + token_rate->whole = temp >> 13; + token_rate->fraction = temp & (((u64)1 << 13) - 1); + return 0; +} +EXPORT_SYMBOL(qman_ceetm_bps2tokenrate); + +int qman_ceetm_tokenrate2bps(const struct qm_ceetm_rate *token_rate, u32 *bps, + int rounding) +{ + u16 pres; + u64 temp; + u64 qman_freq; + int ret; + + /* Read PRES from CEET_CFG_PRES register */ + ret = qman_ceetm_get_prescaler(&pres); + if (ret) + return -EINVAL; + + ret = qm_get_clock(&qman_freq); + if (ret) + return -EINVAL; + + /* bytes-per-second = token-rate / update-reference-period + * + * where "token-rate" is N/8192 for an integer N, and + * "update-reference-period" is (2^22)/(PRES*QHz), where PRES is + * the prescalar value and QHz is the QMan clock frequency. So; + * + * bytes-per-second = (N/8192) / (4194304/PRES*QHz) + * = N*PRES*QHz / (4194304*8192) + * = N*PRES*QHz / (2^35) + * + * Converting to bits-per-second gives; + * + * bps = N*PRES*QHZ / (2^32) + * + * Note, the numerator has a maximum width of 72 bits! So to + * avoid 64-bit overflow errors, we calculate PRES*QHZ (maximum + * width 48 bits) divided by 2^9 (reducing to maximum 39 bits), before + * multiplying by N (goes to maximum of 63 bits). + * + * temp = PRES*QHZ / (2^16) + * kbps = temp*N / (2^16) + */ + temp = ROUNDING(qman_freq * pres, (u64)1 << 16 , rounding); + temp *= ((token_rate->whole << 13) + token_rate->fraction); + *bps = ROUNDING(temp, (u64)(1) << 16, rounding); + return 0; +} +EXPORT_SYMBOL(qman_ceetm_tokenrate2bps); + +int qman_ceetm_sp_claim(struct qm_ceetm_sp **sp, enum qm_dc_portal dcp_idx, + unsigned int sp_idx) +{ + struct qm_ceetm_sp *p; + + DPA_ASSERT((dcp_idx == qm_dc_portal_fman0) || + (dcp_idx == qm_dc_portal_fman1)); + + if ((sp_idx < qman_ceetms[dcp_idx].sp_range[0]) || + (sp_idx > (qman_ceetms[dcp_idx].sp_range[0] + + qman_ceetms[dcp_idx].sp_range[1]))) { + pr_err("Sub-portal index doesn't exist\n"); + return -EINVAL; + } + + list_for_each_entry(p, &qman_ceetms[dcp_idx].sub_portals, node) { + if ((p->idx == sp_idx) && (p->is_claimed == 0)) { + p->is_claimed = 1; + *sp = p; + return 0; + } + } + pr_err("The sub-portal#%d is not available!\n", sp_idx); + return -ENODEV; +} +EXPORT_SYMBOL(qman_ceetm_sp_claim); + +int qman_ceetm_sp_release(struct qm_ceetm_sp *sp) +{ + struct qm_ceetm_sp *p; + + if (sp->lni->is_claimed == 1) { + pr_err("The dependency of sub-portal has not been released!\n"); + return -EBUSY; + } + + list_for_each_entry(p, &qman_ceetms[sp->dcp_idx].sub_portals, node) { + if (p->idx == sp->idx) { + p->is_claimed = 0; + p->lni = NULL; + } + } + /* Disable CEETM mode of this sub-portal */ + qman_sp_disable_ceetm_mode(sp->idx, sp->dcp_idx); + + return 0; +} +EXPORT_SYMBOL(qman_ceetm_sp_release); + +int qman_ceetm_lni_claim(struct qm_ceetm_lni **lni, enum qm_dc_portal dcp_idx, + unsigned int lni_idx) +{ + struct qm_ceetm_lni *p; + + if ((lni_idx < qman_ceetms[dcp_idx].lni_range[0]) || + (lni_idx > (qman_ceetms[dcp_idx].lni_range[0] + + qman_ceetms[dcp_idx].lni_range[1]))) { + pr_err("The lni index is out of range\n"); + return -EINVAL; + } + + list_for_each_entry(p, &qman_ceetms[dcp_idx].lnis, node) { + if ((p->idx == lni_idx) && (p->is_claimed == 0)) { + *lni = p; + p->is_claimed = 1; + return 0; + } + } + + pr_err("The LNI#%d is not available!\n", lni_idx); + return -EINVAL; +} +EXPORT_SYMBOL(qman_ceetm_lni_claim); + +int qman_ceetm_lni_release(struct qm_ceetm_lni *lni) +{ + struct qm_ceetm_lni *p; + struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts; + + if (!list_empty(&lni->channels)) { + pr_err("The LNI dependencies are not released!\n"); + return -EBUSY; + } + + list_for_each_entry(p, &qman_ceetms[lni->dcp_idx].lnis, node) { + if (p->idx == lni->idx) { + p->shaper_enable = 0; + p->shaper_couple = 0; + p->cr_token_rate.whole = 0; + p->cr_token_rate.fraction = 0; + p->er_token_rate.whole = 0; + p->er_token_rate.fraction = 0; + p->cr_token_bucket_limit = 0; + p->er_token_bucket_limit = 0; + p->is_claimed = 0; + } + } + config_opts.cid = CEETM_COMMAND_LNI_SHAPER | lni->idx; + config_opts.dcpid = lni->dcp_idx; + memset(&config_opts.shaper_config, 0, + sizeof(config_opts.shaper_config)); + return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts); +} +EXPORT_SYMBOL(qman_ceetm_lni_release); + +int qman_ceetm_sp_set_lni(struct qm_ceetm_sp *sp, struct qm_ceetm_lni *lni) +{ + struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts; + + config_opts.cid = CEETM_COMMAND_SP_MAPPING | sp->idx; + config_opts.dcpid = sp->dcp_idx; + config_opts.sp_mapping.map_lni_id = lni->idx; + sp->lni = lni; + + if (qman_ceetm_configure_mapping_shaper_tcfc(&config_opts)) + return -EINVAL; + + /* Enable CEETM mode for this sub-portal */ + return qman_sp_enable_ceetm_mode(sp->dcp_idx, sp->idx); +} +EXPORT_SYMBOL(qman_ceetm_sp_set_lni); + +int qman_ceetm_sp_get_lni(struct qm_ceetm_sp *sp, unsigned int *lni_idx) +{ + struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts; + struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result; + + query_opts.cid = CEETM_COMMAND_SP_MAPPING | sp->idx; + query_opts.dcpid = sp->dcp_idx; + if (qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result)) { + pr_err("Can't get SP <-> LNI mapping\n"); + return -EINVAL; + } + *lni_idx = query_result.sp_mapping_query.map_lni_id; + sp->lni->idx = query_result.sp_mapping_query.map_lni_id; + return 0; +} +EXPORT_SYMBOL(qman_ceetm_sp_get_lni); + +int qman_ceetm_lni_enable_shaper(struct qm_ceetm_lni *lni, int coupled, + int oal) +{ + struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts; + + if (lni->shaper_enable) { + pr_err("The shaper has already been enabled\n"); + return -EINVAL; + } + + lni->shaper_enable = 1; + lni->shaper_couple = coupled; + + config_opts.cid = CEETM_COMMAND_LNI_SHAPER | lni->idx; + config_opts.dcpid = lni->dcp_idx; + config_opts.shaper_config.cpl = (coupled << 7) | oal; + config_opts.shaper_config.crtcr = (lni->cr_token_rate.whole << 13) | + lni->cr_token_rate.fraction; + config_opts.shaper_config.ertcr = (lni->er_token_rate.whole << 13) | + lni->er_token_rate.fraction; + config_opts.shaper_config.crtbl = lni->cr_token_bucket_limit; + config_opts.shaper_config.ertbl = lni->er_token_bucket_limit; + return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts); +} +EXPORT_SYMBOL(qman_ceetm_lni_enable_shaper); + +int qman_ceetm_lni_disable_shaper(struct qm_ceetm_lni *lni) +{ + if (!lni->shaper_enable) { + pr_err("The shaper has been disabled\n"); + return -EINVAL; + } + + lni->shaper_enable = 0; + return 0; +} +EXPORT_SYMBOL(qman_ceetm_lni_disable_shaper); + +int qman_ceetm_lni_set_commit_rate(struct qm_ceetm_lni *lni, + const struct qm_ceetm_rate *token_rate, + u16 token_limit) +{ + struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts; + struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts; + struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result; + int ret; + + if (!lni->shaper_enable) { + pr_err("The LNI#%d is unshaped, cannot set CR rate\n", + lni->idx); + return -EINVAL; + } + + query_opts.cid = CEETM_COMMAND_LNI_SHAPER | lni->idx; + query_opts.dcpid = lni->dcp_idx; + ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result); + if (ret) { + pr_err("Fail to get current LNI shaper setting\n"); + return -EINVAL; + } + + lni->cr_token_rate.whole = token_rate->whole; + lni->cr_token_rate.fraction = token_rate->fraction; + lni->cr_token_bucket_limit = token_limit; + config_opts.cid = CEETM_COMMAND_LNI_SHAPER | lni->idx; + config_opts.dcpid = lni->dcp_idx; + config_opts.shaper_config.crtcr = (token_rate->whole << 13) | + (token_rate->fraction); + config_opts.shaper_config.crtbl = token_limit; + config_opts.shaper_config.cpl = query_result.shaper_query.cpl; + config_opts.shaper_config.ertcr = query_result.shaper_query.ertcr; + config_opts.shaper_config.ertbl = query_result.shaper_query.ertbl; + return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts); +} +EXPORT_SYMBOL(qman_ceetm_lni_set_commit_rate); + +int qman_ceetm_lni_get_commit_rate(struct qm_ceetm_lni *lni, + struct qm_ceetm_rate *token_rate, + u16 *token_limit) +{ + struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts; + struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result; + int ret; + + query_opts.cid = CEETM_COMMAND_LNI_SHAPER | lni->idx; + query_opts.dcpid = lni->dcp_idx; + + ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result); + if (ret | !query_result.shaper_query.crtcr | + !query_result.shaper_query.crtbl) { + pr_err("The LNI CR rate or limit is not set\n"); + return -EINVAL; + } + token_rate->whole = query_result.shaper_query.crtcr >> 13; + token_rate->fraction = query_result.shaper_query.crtcr & 0x1FFF; + *token_limit = query_result.shaper_query.crtbl; + return 0; +} +EXPORT_SYMBOL(qman_ceetm_lni_get_commit_rate); + +int qman_ceetm_lni_set_excess_rate(struct qm_ceetm_lni *lni, + const struct qm_ceetm_rate *token_rate, + u16 token_limit) +{ + struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts; + struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts; + struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result; + int ret; + + if (!lni->shaper_enable) { + pr_err("The LIN#%d is unshaped, cannot set ER rate\n", + lni->idx); + return -EINVAL; + } + + query_opts.cid = CEETM_COMMAND_LNI_SHAPER | lni->idx; + query_opts.dcpid = lni->dcp_idx; + ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result); + if (ret) { + pr_err("Fail to get current LNI shaper setting\n"); + return -EINVAL; + } + + lni->er_token_rate.whole = token_rate->whole; + lni->er_token_rate.fraction = token_rate->fraction; + lni->er_token_bucket_limit = token_limit; + config_opts.cid = CEETM_COMMAND_LNI_SHAPER | lni->idx; + config_opts.dcpid = lni->dcp_idx; + config_opts.shaper_config.ertcr = + (token_rate->whole << 13) | (token_rate->fraction); + config_opts.shaper_config.ertbl = token_limit; + config_opts.shaper_config.cpl = query_result.shaper_query.cpl; + config_opts.shaper_config.crtcr = query_result.shaper_query.crtcr; + config_opts.shaper_config.crtbl = query_result.shaper_query.crtbl; + + return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts); +} +EXPORT_SYMBOL(qman_ceetm_lni_set_excess_rate); + +int qman_ceetm_lni_get_excess_rate(struct qm_ceetm_lni *lni, + struct qm_ceetm_rate *token_rate, + u16 *token_limit) +{ + struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts; + struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result; + int ret; + + query_opts.cid = CEETM_COMMAND_LNI_SHAPER | lni->idx; + query_opts.dcpid = lni->dcp_idx; + ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result); + if (ret | !query_result.shaper_query.ertcr | + !query_result.shaper_query.ertbl) { + pr_err("The LNI ER rate or limit is not set\n"); + return -EINVAL; + } + token_rate->whole = query_result.shaper_query.ertcr >> 13; + token_rate->fraction = query_result.shaper_query.ertcr & 0x1FFF; + *token_limit = query_result.shaper_query.ertbl; + return 0; +} +EXPORT_SYMBOL(qman_ceetm_lni_get_excess_rate); + +#define QMAN_CEETM_LNITCFCC_CQ_LEVEL_SHIFT(n) ((15 - n) * 4) +#define QMAN_CEETM_LNITCFCC_ENABLE 0x8 +int qman_ceetm_lni_set_tcfcc(struct qm_ceetm_lni *lni, + unsigned int cq_level, + int traffic_class) +{ + struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts; + struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts; + struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result; + u64 lnitcfcc; + + if ((cq_level > 15) | (traffic_class > 7)) { + pr_err("The CQ or traffic class id is out of range\n"); + return -EINVAL; + } + + query_opts.cid = CEETM_COMMAND_TCFC | lni->idx; + query_opts.dcpid = lni->dcp_idx; + if (qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result)) { + pr_err("Fail to query tcfcc\n"); + return -EINVAL; + } + + lnitcfcc = query_result.tcfc_query.lnitcfcc; + if (traffic_class == -1) { + /* disable tcfc for this CQ */ + lnitcfcc &= ~((u64)QMAN_CEETM_LNITCFCC_ENABLE << + QMAN_CEETM_LNITCFCC_CQ_LEVEL_SHIFT(cq_level)); + } else { + lnitcfcc &= ~((u64)0xF << + QMAN_CEETM_LNITCFCC_CQ_LEVEL_SHIFT(cq_level)); + lnitcfcc |= ((u64)(QMAN_CEETM_LNITCFCC_ENABLE | + traffic_class)) << + QMAN_CEETM_LNITCFCC_CQ_LEVEL_SHIFT(cq_level); + } + config_opts.tcfc_config.lnitcfcc = lnitcfcc; + config_opts.cid = CEETM_COMMAND_TCFC | lni->idx; + config_opts.dcpid = lni->dcp_idx; + return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts); +} +EXPORT_SYMBOL(qman_ceetm_lni_set_tcfcc); + +#define QMAN_CEETM_LNITCFCC_TC_MASK 0x7 +int qman_ceetm_lni_get_tcfcc(struct qm_ceetm_lni *lni, unsigned int cq_level, + int *traffic_class) +{ + struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts; + struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result; + int ret; + u8 lnitcfcc; + + if (cq_level > 15) { + pr_err("the CQ level is out of range\n"); + return -EINVAL; + } + + query_opts.cid = CEETM_COMMAND_TCFC | lni->idx; + query_opts.dcpid = lni->dcp_idx; + ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result); + if (ret) + return ret; + lnitcfcc = (u8)(query_result.tcfc_query.lnitcfcc >> + QMAN_CEETM_LNITCFCC_CQ_LEVEL_SHIFT(cq_level)); + if (lnitcfcc & QMAN_CEETM_LNITCFCC_ENABLE) + *traffic_class = lnitcfcc & QMAN_CEETM_LNITCFCC_TC_MASK; + else + *traffic_class = -1; + return 0; +} +EXPORT_SYMBOL(qman_ceetm_lni_get_tcfcc); + +#define QMAN_CEETM_ENABLE_CHANNEL_SHAPER 0x80 +int qman_ceetm_channel_claim(struct qm_ceetm_channel **channel, + struct qm_ceetm_lni *lni) +{ + struct qm_ceetm_channel *p; + u32 channel_idx; + int ret = 0; + struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts; + static u8 map; + + if (lni->dcp_idx == qm_dc_portal_fman0) + ret = qman_alloc_ceetm0_channel(&channel_idx); + if (lni->dcp_idx == qm_dc_portal_fman1) + ret = qman_alloc_ceetm1_channel(&channel_idx); + if (ret) { + pr_err("The is no channel available for LNI#%d\n", lni->idx); + return -ENODEV; + } + + p = kzalloc(sizeof(*p), GFP_KERNEL); + p->idx = channel_idx; + p->dcp_idx = lni->dcp_idx; + list_add_tail(&p->node, &lni->channels); + INIT_LIST_HEAD(&p->class_queues); + INIT_LIST_HEAD(&p->ccgs); + config_opts.cid = CEETM_COMMAND_CHANNEL_MAPPING | channel_idx; + config_opts.dcpid = lni->dcp_idx; + map = (u8)~QMAN_CEETM_ENABLE_CHANNEL_SHAPER; + map &= lni->idx; + config_opts.channel_mapping.map = map; + if (qman_ceetm_configure_mapping_shaper_tcfc(&config_opts)) { + pr_err("Can't map channel#%d for LNI#%d\n", + channel_idx, lni->idx); + return -EINVAL; + } + *channel = p; + return 0; +} +EXPORT_SYMBOL(qman_ceetm_channel_claim); + +int qman_ceetm_channel_release(struct qm_ceetm_channel *channel) +{ + struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts; + if (!list_empty(&channel->class_queues)) { + pr_err("CEETM channel#%d has class queue unreleased!\n", + channel->idx); + return -EBUSY; + } + if (!list_empty(&channel->ccgs)) { + pr_err("CEETM channel#%d has ccg unreleased!\n", + channel->idx); + return -EBUSY; + } + + config_opts.cid = CEETM_COMMAND_CHANNEL_SHAPER | channel->idx; + config_opts.dcpid = channel->dcp_idx; + memset(&config_opts.shaper_config, 0, + sizeof(config_opts.shaper_config)); + if (qman_ceetm_configure_mapping_shaper_tcfc(&config_opts)) { + pr_err("Can't reset channel shapping parameters\n"); + return -EINVAL; + } + + if (channel->dcp_idx == qm_dc_portal_fman0) + qman_release_ceetm0_channelid(channel->idx); + if (channel->dcp_idx == qm_dc_portal_fman1) + qman_release_ceetm1_channelid(channel->idx); + list_del(&channel->node); + kfree(channel); + + return 0; +} +EXPORT_SYMBOL(qman_ceetm_channel_release); + +int qman_ceetm_channel_enable_shaper(struct qm_ceetm_channel *channel, + int coupled) +{ + struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts; + struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result; + struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts; + u8 map; + + if (channel->shaper_enable == 1) { + pr_err("This channel shaper has been enabled!\n"); + return -EINVAL; + } + + channel->shaper_enable = 1; + channel->shaper_couple = coupled; + + query_opts.cid = (u16)(CEETM_COMMAND_CHANNEL_MAPPING | channel->idx); + query_opts.dcpid = (u8)channel->dcp_idx; + + if (qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result)) { + pr_err("Can't query channel mapping\n"); + return -EINVAL; + } + + map = query_result.channel_mapping_query.map; + map |= QMAN_CEETM_ENABLE_CHANNEL_SHAPER; + + config_opts.cid = CEETM_COMMAND_CHANNEL_MAPPING | channel->idx; + config_opts.dcpid = channel->dcp_idx; + config_opts.channel_mapping.map = map; + if (qman_ceetm_configure_mapping_shaper_tcfc(&config_opts)) { + pr_err("Can't enable shaper for channel #%d\n", + channel->idx); + return -EINVAL; + } + + config_opts.cid = CEETM_COMMAND_CHANNEL_SHAPER | channel->idx; + config_opts.shaper_config.cpl = coupled << 7; + config_opts.shaper_config.crtcr = (channel->cr_token_rate.whole << 13) | + channel->cr_token_rate.fraction; + config_opts.shaper_config.ertcr = (channel->er_token_rate.whole << 13) | + channel->er_token_rate.fraction; + config_opts.shaper_config.crtbl = channel->cr_token_bucket_limit; + config_opts.shaper_config.ertbl = channel->er_token_bucket_limit; + return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts); +} +EXPORT_SYMBOL(qman_ceetm_channel_enable_shaper); + +int qman_ceetm_channel_disable_shaper(struct qm_ceetm_channel *channel) +{ + struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts; + struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result; + struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts; + u8 map; + + if (channel->shaper_enable == 0) { + pr_err("This channel shaper has been disabled\n"); + return -EINVAL; + } + + query_opts.cid = CEETM_COMMAND_CHANNEL_MAPPING | channel->idx; + query_opts.dcpid = channel->dcp_idx; + + if (qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result)) { + pr_err("Can't query channel mapping\n"); + return -EINVAL; + } + + map = query_result.channel_mapping_query.map; + map &= ~QMAN_CEETM_ENABLE_CHANNEL_SHAPER; + + config_opts.cid = CEETM_COMMAND_CHANNEL_MAPPING | channel->idx; + config_opts.dcpid = channel->dcp_idx; + config_opts.channel_mapping.map = map; + return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts); +} +EXPORT_SYMBOL(qman_ceetm_channel_disable_shaper); + +int qman_ceetm_channel_set_commit_rate(struct qm_ceetm_channel *channel, + const struct qm_ceetm_rate *token_rate, + u16 token_limit) +{ + struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts; + struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts; + struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result; + int ret; + + if (!channel->shaper_enable) { + pr_err("This channel is unshaped\n"); + return -EINVAL; + } + + query_opts.cid = CEETM_COMMAND_CHANNEL_SHAPER | channel->idx; + query_opts.dcpid = channel->dcp_idx; + + ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result); + if (ret) { + pr_err("Fail to get the current channel shaper setting\n"); + return -EINVAL; + } + + channel->cr_token_rate.whole = token_rate->whole; + channel->cr_token_rate.fraction = token_rate->fraction; + channel->cr_token_bucket_limit = token_limit; + config_opts.cid = CEETM_COMMAND_CHANNEL_SHAPER | channel->idx; + config_opts.dcpid = channel->dcp_idx; + config_opts.shaper_config.crtcr = (token_rate->whole << 13) | + (token_rate->fraction); + config_opts.shaper_config.crtbl = token_limit; + config_opts.shaper_config.cpl = query_result.shaper_query.cpl; + config_opts.shaper_config.ertcr = query_result.shaper_query.ertcr; + config_opts.shaper_config.ertbl = query_result.shaper_query.ertbl; + return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts); +} +EXPORT_SYMBOL(qman_ceetm_channel_set_commit_rate); + +int qman_ceetm_channel_get_commit_rate(struct qm_ceetm_channel *channel, + struct qm_ceetm_rate *token_rate, + u16 *token_limit) +{ + struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts; + struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result; + int ret; + + query_opts.cid = CEETM_COMMAND_CHANNEL_SHAPER | channel->idx; + query_opts.dcpid = channel->dcp_idx; + + ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result); + if (ret | !query_result.shaper_query.crtcr | + !query_result.shaper_query.crtbl) { + pr_err("The channel commit rate or limit is not set\n"); + return -EINVAL; + } + token_rate->whole = query_result.shaper_query.crtcr >> 13; + token_rate->fraction = query_result.shaper_query.crtcr & 0x1FFF; + *token_limit = query_result.shaper_query.crtbl; + return 0; +} +EXPORT_SYMBOL(qman_ceetm_channel_get_commit_rate); + +int qman_ceetm_channel_set_excess_rate(struct qm_ceetm_channel *channel, + const struct qm_ceetm_rate *token_rate, + u16 token_limit) +{ + struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts; + struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts; + struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result; + int ret; + + if (!channel->shaper_enable) { + pr_err("This channel is unshaped\n"); + return -EINVAL; + } + + query_opts.cid = CEETM_COMMAND_CHANNEL_SHAPER | channel->idx; + query_opts.dcpid = channel->dcp_idx; + ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result); + if (ret) { + pr_err("Fail to get the current channel shaper setting\n"); + return -EINVAL; + } + + channel->er_token_rate.whole = token_rate->whole; + channel->er_token_rate.fraction = token_rate->fraction; + channel->er_token_bucket_limit = token_limit; + config_opts.cid = CEETM_COMMAND_CHANNEL_SHAPER | channel->idx; + config_opts.dcpid = channel->dcp_idx; + config_opts.shaper_config.ertcr = + (token_rate->whole << 13) | (token_rate->fraction); + config_opts.shaper_config.ertbl = token_limit; + config_opts.shaper_config.cpl = query_result.shaper_query.cpl; + config_opts.shaper_config.crtcr = query_result.shaper_query.crtcr; + config_opts.shaper_config.crtbl = query_result.shaper_query.crtbl; + return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts); +} +EXPORT_SYMBOL(qman_ceetm_channel_set_excess_rate); + +int qman_ceetm_channel_get_excess_rate(struct qm_ceetm_channel *channel, + struct qm_ceetm_rate *token_rate, + u16 *token_limit) +{ + struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts; + struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result; + int ret; + + query_opts.cid = CEETM_COMMAND_CHANNEL_SHAPER | channel->idx; + query_opts.dcpid = channel->dcp_idx; + ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result); + if (ret | !query_result.shaper_query.ertcr | + !query_result.shaper_query.ertbl) { + pr_err("The channel excess rate or limit is not set\n"); + return -EINVAL; + } + token_rate->whole = query_result.shaper_query.ertcr >> 13; + token_rate->fraction = query_result.shaper_query.ertcr & 0x1FFF; + *token_limit = query_result.shaper_query.ertbl; + return 0; +} +EXPORT_SYMBOL(qman_ceetm_channel_get_excess_rate); + +int qman_ceetm_channel_set_weight(struct qm_ceetm_channel *channel, + u16 token_limit) +{ + struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts; + + if (channel->shaper_enable) { + pr_err("This channel is a shaped one\n"); + return -EINVAL; + } + + channel->cr_token_bucket_limit = token_limit; + config_opts.cid = CEETM_COMMAND_CHANNEL_SHAPER | channel->idx; + config_opts.dcpid = channel->dcp_idx; + config_opts.shaper_config.crtbl = token_limit; + return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts); +} +EXPORT_SYMBOL(qman_ceetm_channel_set_weight); + +int qman_ceetm_channel_get_weight(struct qm_ceetm_channel *channel, + u16 *token_limit) +{ + struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts; + struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result; + int ret; + + query_opts.cid = CEETM_COMMAND_CHANNEL_SHAPER | channel->idx; + query_opts.dcpid = channel->dcp_idx; + ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result); + if (ret | !query_result.shaper_query.crtbl) { + pr_err("This unshaped channel's uFQ wight is unavailable\n"); + return -EINVAL; + } + *token_limit = query_result.shaper_query.crtbl; + return 0; +} +EXPORT_SYMBOL(qman_ceetm_channel_get_weight); + +int qman_ceetm_channel_set_group(struct qm_ceetm_channel *channel, int group_b, + unsigned int prio_a, unsigned int prio_b) +{ + struct qm_mcc_ceetm_class_scheduler_config config_opts; + struct qm_mcr_ceetm_class_scheduler_query query_result; + int i; + + if (!prio_a | (prio_a > 7)) { + pr_err("The priority of group A is out of range\n"); + return -EINVAL; + } + if (!prio_a || (prio_b > 7)) { + pr_err("The priority of group B is out of range\n"); + return -EINVAL; + } + + if (qman_ceetm_query_class_scheduler(channel, &query_result)) { + pr_err("Can't query channel#%d's scheduler!\n", channel->idx); + return -EINVAL; + } + + config_opts.cqcid = channel->idx; + config_opts.dcpid = channel->dcp_idx; + if (!group_b) + config_opts.gpc = (u8)((1 << 6) | prio_a); + else + config_opts.gpc = (u8)((prio_b << 3) | prio_a); + + for (i = 0; i < 8; i++) + config_opts.w[i] = query_result.w[i]; + config_opts.crem = query_result.crem; + config_opts.erem = query_result.erem; + + return qman_ceetm_configure_class_scheduler(&config_opts); +} +EXPORT_SYMBOL(qman_ceetm_channel_set_group); + +int qman_ceetm_channel_get_group(struct qm_ceetm_channel *channel, int *group_b, + unsigned int *prio_a, unsigned int *prio_b) +{ + struct qm_mcr_ceetm_class_scheduler_query query_result; + + if (qman_ceetm_query_class_scheduler(channel, &query_result)) { + pr_err("Can't query channel#%d's scheduler!\n", channel->idx); + return -EINVAL; + } + *group_b = (query_result.gpc >> 6) & 0x1; + *prio_a = query_result.gpc & 0x3; + *prio_b = (query_result.gpc >> 3) & 0x3; + return 0; +} +EXPORT_SYMBOL(qman_ceetm_channel_get_group); + +#define CQ_ELIGIBILITY_MASK(n) (1 << (7 - n)) +#define CQ_A_ELIGIBILITY_MASK (1 << 8) +#define CQ_B_ELIGIBILITY_MASK (1 << 9) +int qman_ceetm_cq_claim(struct qm_ceetm_cq **cq, + struct qm_ceetm_channel *channel, unsigned int idx, + struct qm_ceetm_ccg *ccg) +{ + struct qm_ceetm_cq *p; + struct qm_mcc_ceetm_cq_config cq_config; + struct qm_mcc_ceetm_class_scheduler_config csch_config; + struct qm_mcr_ceetm_class_scheduler_query csch_query_result; + int i; + + if (idx > 7) { + pr_err("The independent class queue id is out of range\n"); + return -EINVAL; + } + + list_for_each_entry(p, &channel->class_queues, node) { + if (p->idx == idx) { + pr_err("The CQ#%d has been claimed!\n", idx); + return -EINVAL; + } + } + + p = kmalloc(sizeof(*p), GFP_KERNEL); + if (!p) { + pr_err("Can't allocate memory for CQ#%d!\n", idx); + return -ENOMEM; + } + + list_add_tail(&p->node, &channel->class_queues); + p->idx = idx; + p->is_claimed = 1; + p->parent = channel; + INIT_LIST_HEAD(&p->bound_lfqids); + + if (ccg) { + cq_config.cqid = (channel->idx << 4) | idx; + cq_config.dcpid = channel->dcp_idx; + cq_config.ccgid = ccg->idx; + if (qman_ceetm_configure_cq(&cq_config)) { + pr_err("Can't configure the CQ#%d with CCGRID#%d\n", + idx, ccg->idx); + return -EINVAL; + } + } + + if (channel->shaper_enable) { + if (qman_ceetm_query_class_scheduler(channel, + &csch_query_result)) { + pr_err("Can't query channel#%d!\n", channel->idx); + return -EINVAL; + } + csch_config.cqcid = channel->idx; + csch_config.dcpid = channel->dcp_idx; + csch_config.crem = csch_query_result.crem | + CQ_ELIGIBILITY_MASK(idx); + csch_config.erem = csch_query_result.erem | + CQ_ELIGIBILITY_MASK(idx); + csch_config.gpc = csch_query_result.gpc; + for (i = 0; i < 8; i++) + csch_config.w[i] = csch_query_result.w[i]; + + if (qman_ceetm_configure_class_scheduler(&csch_config)) { + pr_err("Can't config channel scheduler to set" + " eligibility mask for CQ#%d\n", idx); + return -EINVAL; + } + } + + *cq = p; + return 0; +} +EXPORT_SYMBOL(qman_ceetm_cq_claim); + +int qman_ceetm_cq_claim_A(struct qm_ceetm_cq **cq, + struct qm_ceetm_channel *channel, unsigned int idx, + struct qm_ceetm_ccg *ccg) +{ + struct qm_ceetm_cq *p; + struct qm_mcc_ceetm_cq_config cq_config; + struct qm_mcc_ceetm_class_scheduler_config csch_config; + struct qm_mcr_ceetm_class_scheduler_query csch_query_result; + int i; + + if ((idx < 7) || (idx > 15)) { + pr_err("This grouped class queue id is out of range\n"); + return -EINVAL; + } + p = kmalloc(sizeof(*p), GFP_KERNEL); + if (!p) { + pr_err("Can't allocate memory for CQ#%d!\n", idx); + return -ENOMEM; + } + + list_for_each_entry(p, &channel->class_queues, node) { + if (p->idx == idx) { + pr_err("The CQ#%d has been claimed!\n", idx); + return -EINVAL; + } + } + list_add_tail(&p->node, &channel->class_queues); + p->idx = idx; + p->is_claimed = 1; + p->parent = channel; + INIT_LIST_HEAD(&p->bound_lfqids); + + if (ccg) { + cq_config.cqid = (channel->idx << 4) | idx; + cq_config.dcpid = channel->dcp_idx; + cq_config.ccgid = ccg->idx; + if (qman_ceetm_configure_cq(&cq_config)) { + pr_err("Can't configure the CQ#%d with CCGRID#%d\n", + idx, ccg->idx); + return -EINVAL; + } + } + + if (channel->shaper_enable) { + if (qman_ceetm_query_class_scheduler(channel, + &csch_query_result)) { + pr_err("Can't query channel#%d!\n", channel->idx); + return -EINVAL; + } + csch_config.cqcid = channel->idx; + csch_config.dcpid = channel->dcp_idx; + csch_config.crem = csch_query_result.crem | + CQ_A_ELIGIBILITY_MASK; + csch_config.erem = csch_query_result.erem | + CQ_A_ELIGIBILITY_MASK; + csch_config.gpc = csch_query_result.gpc; + for (i = 0; i < 8; i++) + csch_config.w[i] = csch_query_result.w[i]; + if (qman_ceetm_configure_class_scheduler(&csch_config)) { + pr_err("Can't config channel scheduler to set" + " eligibility mask for CQ#%d\n", idx); + return -EINVAL; + } + } + *cq = p; + return 0; +} +EXPORT_SYMBOL(qman_ceetm_cq_claim_A); + +int qman_ceetm_cq_claim_B(struct qm_ceetm_cq **cq, + struct qm_ceetm_channel *channel, unsigned int idx, + struct qm_ceetm_ccg *ccg) +{ + struct qm_ceetm_cq *p; + struct qm_mcc_ceetm_cq_config cq_config; + struct qm_mcc_ceetm_class_scheduler_config csch_config; + struct qm_mcr_ceetm_class_scheduler_query csch_query_result; + int i; + + if ((idx < 11) || (idx > 15)) { + pr_err("This grouped class queue id is out of range\n"); + return -EINVAL; + } + + p = kmalloc(sizeof(*p), GFP_KERNEL); + if (!p) { + pr_err("Can't allocate memory for CQ#%d!\n", idx); + return -ENOMEM; + } + + list_for_each_entry(p, &channel->class_queues, node) { + if (p->idx == idx) { + pr_err("The CQ#%d has been claimed!\n", idx); + return -EINVAL; + } + } + list_add_tail(&p->node, &channel->class_queues); + p->idx = idx; + p->is_claimed = 1; + p->parent = channel; + INIT_LIST_HEAD(&p->bound_lfqids); + + if (ccg) { + cq_config.cqid = (channel->idx << 4) | idx; + cq_config.dcpid = channel->dcp_idx; + cq_config.ccgid = ccg->idx; + if (qman_ceetm_configure_cq(&cq_config)) { + pr_err("Can't configure the CQ#%d with CCGRID#%d\n", + idx, ccg->idx); + return -EINVAL; + } + } + + if (channel->shaper_enable) { + if (qman_ceetm_query_class_scheduler(channel, + &csch_query_result)) { + pr_err("Can't query channel#%d!\n", channel->idx); + return -EINVAL; + } + csch_config.cqcid = channel->idx; + csch_config.dcpid = channel->dcp_idx; + csch_config.crem = csch_query_result.crem | + CQ_B_ELIGIBILITY_MASK; + csch_config.erem = csch_query_result.erem | + CQ_B_ELIGIBILITY_MASK; + csch_config.gpc = csch_query_result.gpc; + for (i = 0; i < 8; i++) + csch_config.w[i] = csch_query_result.w[i]; + if (qman_ceetm_configure_class_scheduler(&csch_config)) { + pr_err("Can't config channel scheduler to set" + " eligibility mask for CQ#%d\n", idx); + return -EINVAL; + } + } + *cq = p; + return 0; +} +EXPORT_SYMBOL(qman_ceetm_cq_claim_B); + +int qman_ceetm_cq_release(struct qm_ceetm_cq *cq) +{ + if (!list_empty(&cq->bound_lfqids)) { + pr_err("The CQ#%d has unreleased LFQID\n", cq->idx); + return -EBUSY; + } + list_del(&cq->node); + kfree(cq); + return 0; +} +EXPORT_SYMBOL(qman_ceetm_cq_release); + +int qman_ceetm_set_queue_weight(struct qm_ceetm_cq *cq, + struct qm_ceetm_weight_code *weight_code) +{ + struct qm_mcc_ceetm_class_scheduler_config config_opts; + struct qm_mcr_ceetm_class_scheduler_query query_result; + int i; + + if (qman_ceetm_query_class_scheduler(cq->parent, &query_result)) { + pr_err("Can't query channel#%d's scheduler!\n", + cq->parent->idx); + return -EINVAL; + } + + config_opts.cqcid = cq->parent->idx; + config_opts.dcpid = cq->parent->dcp_idx; + config_opts.crem = query_result.crem; + config_opts.erem = query_result.erem; + config_opts.gpc = query_result.gpc; + for (i = 0; i < 8; i++) + config_opts.w[i] = query_result.w[i]; + config_opts.w[cq->idx] = (weight_code->y << 3) | weight_code->x; + return qman_ceetm_configure_class_scheduler(&config_opts); +} +EXPORT_SYMBOL(qman_ceetm_set_queue_weight); + +int qman_ceetm_get_queue_weight(struct qm_ceetm_cq *cq, + struct qm_ceetm_weight_code *weight_code) +{ + struct qm_mcr_ceetm_class_scheduler_query query_result; + + if (qman_ceetm_query_class_scheduler(cq->parent, + &query_result)) { + pr_err("Can't get the weight code for CQ#%d!\n", cq->idx); + return -EINVAL; + } + weight_code->y = (query_result.w[cq->idx] >> 3) & 0x1F; + weight_code->x = query_result.w[cq->idx] & 0x3; + return 0; +} +EXPORT_SYMBOL(qman_ceetm_get_queue_weight); + +/* The WBFS code is represent as {x,y}, the effect wieght can be calculated as: + * effective weight = 2^x / (1 - (y/64)) + * = 2^(x+6) / (64 - y) + */ +#define QM_WBFS_MAKECODE(x, y) (((y) << 3) | ((x & 0x7))) +#define QM_WBFS_CODE_X(c) ((c) & 0x7) +#define QM_WBFS_CODE_Y(c) ((c) >> 3) +static void reduce_fraction(u32 *n, u32 *d) +{ + u32 factor = 2; + u32 lesser = (*n < *d) ? *n : *d; + /* If factor exceeds the square-root of the lesser of *n and *d, + * then there's no point continuing. Proof: if there was a factor + * bigger than the square root, that would imply there exists + * another factor smaller than the square-root with which it + * multiplies to give 'lesser' - but that's a contradiction + * because the other factor would have already been found and + * divided out. + */ + while ((factor * factor) <= lesser) { + /* If 'factor' is a factor of *n and *d, divide them both + * by 'factor' as many times as possible. + */ + while (!(*n % factor) && !(*d % factor)) { + *n /= factor; + *d /= factor; + lesser /= factor; + } + if (factor == 2) + factor = 3; + else + factor += 2; + } +} + +int qman_ceetm_wbfs2ratio(unsigned int weight_code, + u32 *numerator, + u32 *denominator) +{ + *numerator = (u32) 1 << (QM_WBFS_CODE_X(weight_code) + 6); + *denominator = 64 - QM_WBFS_CODE_Y(weight_code); + reduce_fraction(numerator, denominator); + return 0; +} +EXPORT_SYMBOL(qman_ceetm_wbfs2ratio); + +/* For a given x, the weight is between 2^x (inclusive) and 2^(x+1) (exclusive). + * So find 'x' by range, and then estimate 'y' using: + * 64 - y = 2^(x + 6) / weight + * = 2^(x + 6) / (n/d) + * = d * 2^(x+6) / n + * y = 64 - (d * 2^(x+6) / n) + */ +int qman_ceetm_ratio2wbfs(u32 numerator, + u32 denominator, + unsigned int *weight_code, + int rounding) +{ + unsigned int y, x = 0; + /* search incrementing 'x' until: + * weight < 2^(x+1) + * n/d < 2^(x+1) + * n < d * 2^(x+1) + */ + while ((x < 8) && (numerator >= (denominator << (x + 1)))) + x++; + if (x >= 8) + return -ERANGE; + /* because of the subtraction, use '-rounding' */ + y = 64 - ROUNDING(denominator << (x + 6), numerator, -rounding); + if (y >= 32) + return -ERANGE; + *weight_code = QM_WBFS_MAKECODE(x, y); + return 0; +} +EXPORT_SYMBOL(qman_ceetm_ratio2wbfs); + +int qman_ceetm_cq_get_dequeue_statistics(struct qm_ceetm_cq *cq, u32 flags, + u64 *frame_count, u64 *byte_count) +{ + struct qm_mcr_ceetm_statistics_query result; + u16 cid, command_type; + enum qm_dc_portal dcp_idx; + int ret; + + cid = (cq->parent->idx << 4) | cq->idx; + dcp_idx = cq->parent->dcp_idx; + if (flags == QMAN_CEETM_FLAG_CLEAR_STATISTICS_COUNTER) + command_type = CEETM_QUERY_DEQUEUE_CLEAR_STATISTICS; + else + command_type = CEETM_QUERY_DEQUEUE_STATISTICS; + + ret = qman_ceetm_query_statistics(cid, dcp_idx, command_type, &result); + if (ret) { + pr_err("Can't query the statistics of CQ#%d!\n", cq->idx); + return -EINVAL; + } + + *frame_count = result.frm_cnt; + *byte_count = result.byte_cnt; + return 0; +} +EXPORT_SYMBOL(qman_ceetm_cq_get_dequeue_statistics); + +#define CEETM_LFQMT_LFQID_MSB 0xF00000 +#define CEETM_LFQMT_LFQID_LSB 0x000FFF +int qman_ceetm_lfq_claim(struct qm_ceetm_lfq **lfq, + struct qm_ceetm_cq *cq) +{ + struct qm_ceetm_lfq *p; + u32 lfqid; + int ret = 0; + struct qm_mcc_ceetm_lfqmt_config lfqmt_config; + + if (cq->parent->dcp_idx == qm_dc_portal_fman0) + ret = qman_alloc_ceetm0_lfqid(&lfqid); + if (cq->parent->dcp_idx == qm_dc_portal_fman1) + ret = qman_alloc_ceetm1_lfqid(&lfqid); + if (ret) { + pr_err("There is no lfqid avalaible for CQ#%d!\n", cq->idx); + return -ENODEV; + } + p = kmalloc(sizeof(*p), GFP_KERNEL); + if (!p) + return -ENOMEM; + p->idx = lfqid; + p->dctidx = (u16)(lfqid & CEETM_LFQMT_LFQID_LSB); + p->parent = cq->parent; + list_add_tail(&p->node, &cq->bound_lfqids); + + lfqmt_config.lfqid = CEETM_LFQMT_LFQID_MSB | + (cq->parent->dcp_idx << 16) | + (lfqid & CEETM_LFQMT_LFQID_LSB); + lfqmt_config.cqid = (cq->parent->idx << 4) | (cq->idx); + lfqmt_config.dctidx = p->dctidx; + if (qman_ceetm_configure_lfqmt(&lfqmt_config)) { + pr_err("Can't configure LFQMT for LFQID#%d @ CQ#%d\n", + lfqid, cq->idx); + return -EINVAL; + } + *lfq = p; + return 0; +} +EXPORT_SYMBOL(qman_ceetm_lfq_claim); + +int qman_ceetm_lfq_release(struct qm_ceetm_lfq *lfq) +{ + if (lfq->parent->dcp_idx == qm_dc_portal_fman0) + qman_release_ceetm0_lfqid(lfq->idx); + if (lfq->parent->dcp_idx == qm_dc_portal_fman1) + qman_release_ceetm1_lfqid(lfq->idx); + list_del(&lfq->node); + kfree(lfq); + return 0; +} +EXPORT_SYMBOL(qman_ceetm_lfq_release); + +int qman_ceetm_lfq_set_context(struct qm_ceetm_lfq *lfq, u64 context_a, + u32 context_b) +{ + struct qm_mcc_ceetm_dct_config dct_config; + lfq->context_a = context_a; + lfq->context_b = context_b; + dct_config.dctidx = (u16)lfq->dctidx; + dct_config.dcpid = lfq->parent->dcp_idx; + dct_config.context_b = context_b; + dct_config.context_a = context_a; + return qman_ceetm_configure_dct(&dct_config); +} +EXPORT_SYMBOL(qman_ceetm_lfq_set_context); + +int qman_ceetm_lfq_get_context(struct qm_ceetm_lfq *lfq, u64 *context_a, + u32 *context_b) +{ + struct qm_mcc_ceetm_dct_query dct_query; + struct qm_mcr_ceetm_dct_query query_result; + + dct_query.dctidx = (u16)lfq->dctidx; + dct_query.dcpid = lfq->parent->dcp_idx; + if (qman_ceetm_query_dct(&dct_query, &query_result)) { + pr_err("Can't query LFQID#%d's context!\n", lfq->idx); + return -EINVAL; + } + *context_a = query_result.context_a; + *context_b = query_result.context_b; + return 0; +} +EXPORT_SYMBOL(qman_ceetm_lfq_get_context); + +int qman_ceetm_create_fq(struct qm_ceetm_lfq *lfq, struct qman_fq *fq) +{ + spin_lock_init(&fq->fqlock); + fq->fqid = lfq->idx; + fq->flags = QMAN_FQ_FLAG_NO_MODIFY; + if (lfq->ern) + fq->cb.ern = lfq->ern; +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP + if (unlikely(find_empty_fq_table_entry(&fq->key, fq))) + return -ENOMEM; +#endif + return 0; +} +EXPORT_SYMBOL(qman_ceetm_create_fq); + +int qman_ceetm_ccg_claim(struct qm_ceetm_ccg **ccg, + struct qm_ceetm_channel *channel, + unsigned int idx, + void (*cscn)(struct qm_ceetm_ccg *, + void *cb_ctx, + int congested), + void *cb_ctx) +{ + struct qm_ceetm_ccg *p; + + if ((idx < 0) || (idx > 15)) { + pr_err("The given ccg index is out of range\n"); + return -EINVAL; + } + + list_for_each_entry(p, &channel->ccgs, node) { + if (p->idx == idx) { + pr_err("The CCG#%d has been claimed\n", idx); + return -EINVAL; + } + } + + p = kmalloc(sizeof(*p), GFP_KERNEL); + if (!p) { + pr_err("Can't allocate memory for CCG#%d!\n", idx); + return -ENOMEM; + } + + list_add_tail(&p->node, &channel->ccgs); + + p->idx = idx; + p->parent = channel; + p->cb = cscn; + p->cb_ctx = cb_ctx; + + *ccg = p; + return 0; +} +EXPORT_SYMBOL(qman_ceetm_ccg_claim); + +int qman_ceetm_ccg_release(struct qm_ceetm_ccg *ccg) +{ + list_del(&ccg->node); + kfree(ccg); + return 0; +} +EXPORT_SYMBOL(qman_ceetm_ccg_release); + +int qman_ceetm_ccg_set(struct qm_ceetm_ccg *ccg, u16 we_mask, + const struct qm_ceetm_ccg_params *params) +{ + struct qm_mcc_ceetm_ccgr_config config_opts; + + config_opts.ccgrid = CEETM_CCGR_CM_CONFIGURE | + (ccg->parent->idx << 4) | ccg->idx; + config_opts.dcpid = ccg->parent->dcp_idx; + config_opts.we_mask = we_mask; + config_opts.cm_config.ctl = (params->wr_en_g << 6) | + (params->wr_en_y << 5) | + (params->wr_en_r << 4) | + (params->td_en << 3) | + (params->td_mode << 2) | + (params->cscn_en << 1) | + (params->mode); + config_opts.cm_config.oal = params->oal; + config_opts.cm_config.cs_thres = params->cs_thres_in; + config_opts.cm_config.cs_thres_x = params->cs_thres_out; + config_opts.cm_config.td_thres = params->td_thres; + config_opts.cm_config.wr_parm_g = params->wr_parm_g; + config_opts.cm_config.wr_parm_y = params->wr_parm_y; + config_opts.cm_config.wr_parm_r = params->wr_parm_r; + + return qman_ceetm_configure_ccgr(&config_opts); +} +EXPORT_SYMBOL(qman_ceetm_ccg_set); + +#define CEETM_CCGR_CTL_MASK 0x01 +int qman_ceetm_ccg_get(struct qm_ceetm_ccg *ccg, + struct qm_ceetm_ccg_params *params) +{ + struct qm_mcc_ceetm_ccgr_query query_opts; + struct qm_mcr_ceetm_ccgr_query query_result; + + query_opts.ccgrid = CEETM_CCGR_CM_QUERY | + (ccg->parent->idx << 4) | ccg->idx; + query_opts.dcpid = ccg->parent->dcp_idx; + + if (qman_ceetm_query_ccgr(&query_opts, &query_result)) { + pr_err("Can't query CCGR#%d\n", ccg->idx); + return -EINVAL; + } + + params->wr_parm_r = query_result.cm_query.wr_parm_r; + params->wr_parm_y = query_result.cm_query.wr_parm_y; + params->wr_parm_g = query_result.cm_query.wr_parm_g; + params->td_thres = query_result.cm_query.td_thres; + params->cs_thres_out = query_result.cm_query.cs_thres_x; + params->cs_thres_in = query_result.cm_query.cs_thres; + params->oal = query_result.cm_query.oal; + params->wr_en_g = (query_result.cm_query.ctl >> 6) & + CEETM_CCGR_CTL_MASK; + params->wr_en_y = (query_result.cm_query.ctl >> 5) & + CEETM_CCGR_CTL_MASK; + params->wr_en_r = (query_result.cm_query.ctl >> 4) & + CEETM_CCGR_CTL_MASK; + params->td_en = (query_result.cm_query.ctl >> 3) & + CEETM_CCGR_CTL_MASK; + params->td_mode = (query_result.cm_query.ctl >> 2) & + CEETM_CCGR_CTL_MASK; + params->cscn_en = (query_result.cm_query.ctl >> 1) & + CEETM_CCGR_CTL_MASK; + params->mode = (query_result.cm_query.ctl & CEETM_CCGR_CTL_MASK); + + return 0; +} +EXPORT_SYMBOL(qman_ceetm_ccg_get); + +int qman_ceetm_ccg_get_reject_statistics(struct qm_ceetm_ccg *ccg, u32 flags, + u64 *frame_count, u64 *byte_count) +{ + struct qm_mcr_ceetm_statistics_query result; + u16 cid, command_type; + enum qm_dc_portal dcp_idx; + int ret; + + cid = (ccg->parent->idx << 4) | ccg->idx; + dcp_idx = ccg->parent->dcp_idx; + if (flags == QMAN_CEETM_FLAG_CLEAR_STATISTICS_COUNTER) + command_type = CEETM_QUERY_REJECT_CLEAR_STATISTICS; + else + command_type = CEETM_QUERY_REJECT_STATISTICS; + + ret = qman_ceetm_query_statistics(cid, dcp_idx, command_type, &result); + if (ret) { + pr_err("Can't query the statistics of CCG#%d!\n", ccg->idx); + return -EINVAL; + } + + *frame_count = result.frm_cnt; + *byte_count = result.byte_cnt; + return 0; +} +EXPORT_SYMBOL(qman_ceetm_ccg_get_reject_statistics); + +#define CEETM_CSCN_TARG_SWP 0 +#define CEETM_CSCN_TARG_DCP 1 +int qman_ceetm_cscn_swp_set(struct qm_ceetm_ccg *ccg, + u16 swp_idx, + unsigned int cscn_enabled, + u16 we_mask, + const struct qm_ceetm_ccg_params *params) +{ + struct qm_mcc_ceetm_ccgr_config config_opts; + int ret; + + config_opts.ccgrid = CEETM_CCGR_CM_CONFIGURE | + (ccg->parent->idx << 4) | ccg->idx; + config_opts.dcpid = ccg->parent->dcp_idx; + config_opts.we_mask = we_mask | QM_CCGR_WE_CSCN_TUPD; + config_opts.cm_config.cscn_tupd = (cscn_enabled << 15) | + (CEETM_CSCN_TARG_SWP << 14) | + swp_idx; + config_opts.cm_config.ctl = (params->wr_en_g << 6) | + (params->wr_en_y << 5) | + (params->wr_en_r << 4) | + (params->td_en << 3) | + (params->td_mode << 2) | + (params->cscn_en << 1) | + (params->mode); + config_opts.cm_config.cs_thres = params->cs_thres_in; + config_opts.cm_config.cs_thres_x = params->cs_thres_out; + config_opts.cm_config.td_thres = params->td_thres; + config_opts.cm_config.wr_parm_g = params->wr_parm_g; + config_opts.cm_config.wr_parm_y = params->wr_parm_y; + config_opts.cm_config.wr_parm_r = params->wr_parm_r; + + ret = qman_ceetm_configure_ccgr(&config_opts); + if (ret) { + pr_err("Configure CSCN_TARG_SWP failed!\n"); + return -EINVAL; + } + return 0; +} +EXPORT_SYMBOL(qman_ceetm_cscn_swp_set); + +int qman_ceetm_cscn_swp_get(struct qm_ceetm_ccg *ccg, + u16 swp_idx, + unsigned int *cscn_enabled) +{ + struct qm_mcc_ceetm_ccgr_query query_opts; + struct qm_mcr_ceetm_ccgr_query query_result; + + query_opts.ccgrid = CEETM_CCGR_CM_QUERY | + (ccg->parent->idx << 4) | ccg->idx; + query_opts.dcpid = ccg->parent->dcp_idx; + + if (qman_ceetm_query_ccgr(&query_opts, &query_result)) { + pr_err("Can't query CCGR#%d\n", ccg->idx); + return -EINVAL; + } + + if (swp_idx < 63) + *cscn_enabled = (query_result.cm_query.cscn_targ_swp[0] >> + (63 - swp_idx)) & 0x1; + else + *cscn_enabled = (query_result.cm_query.cscn_targ_swp[1] >> + (127 - swp_idx)) & 0x1; + return 0; +} +EXPORT_SYMBOL(qman_ceetm_cscn_swp_get); + +int qman_ceetm_cscn_dcp_set(struct qm_ceetm_ccg *ccg, + u16 dcp_idx, + u8 vcgid, + unsigned int cscn_enabled, + u16 we_mask, + const struct qm_ceetm_ccg_params *params) +{ + struct qm_mcc_ceetm_ccgr_config config_opts; + int ret; + + config_opts.ccgrid = CEETM_CCGR_CM_CONFIGURE | + (ccg->parent->idx << 4) | ccg->idx; + config_opts.dcpid = ccg->parent->dcp_idx; + config_opts.we_mask = we_mask | QM_CCGR_WE_CSCN_TUPD | QM_CCGR_WE_CDV; + config_opts.cm_config.cdv = vcgid; + config_opts.cm_config.cscn_tupd = (cscn_enabled << 15) | + (CEETM_CSCN_TARG_DCP << 14) | + dcp_idx; + config_opts.cm_config.ctl = (params->wr_en_g << 6) | + (params->wr_en_y << 5) | + (params->wr_en_r << 4) | + (params->td_en << 3) | + (params->td_mode << 2) | + (params->cscn_en << 1) | + (params->mode); + config_opts.cm_config.cs_thres = params->cs_thres_in; + config_opts.cm_config.cs_thres_x = params->cs_thres_out; + config_opts.cm_config.td_thres = params->td_thres; + config_opts.cm_config.wr_parm_g = params->wr_parm_g; + config_opts.cm_config.wr_parm_y = params->wr_parm_y; + config_opts.cm_config.wr_parm_r = params->wr_parm_r; + + ret = qman_ceetm_configure_ccgr(&config_opts); + if (ret) { + pr_err("Configure CSCN_TARG_DCP failed!\n"); + return -EINVAL; + } + return 0; +} +EXPORT_SYMBOL(qman_ceetm_cscn_dcp_set); + +int qman_ceetm_cscn_dcp_get(struct qm_ceetm_ccg *ccg, + u16 dcp_idx, + u8 *vcgid, + unsigned int *cscn_enabled) +{ + struct qm_mcc_ceetm_ccgr_query query_opts; + struct qm_mcr_ceetm_ccgr_query query_result; + + query_opts.ccgrid = CEETM_CCGR_CM_QUERY | + (ccg->parent->idx << 4) | ccg->idx; + query_opts.dcpid = ccg->parent->dcp_idx; + + if (qman_ceetm_query_ccgr(&query_opts, &query_result)) { + pr_err("Can't query CCGR#%d\n", ccg->idx); + return -EINVAL; + } + + *vcgid = query_result.cm_query.cdv; + *cscn_enabled = (query_result.cm_query.cscn_targ_dcp >> + (7 - dcp_idx)) & 0x1; + return 0; +} +EXPORT_SYMBOL(qman_ceetm_cscn_dcp_get); + +int qman_ceetm_querycongestion(u16 *ccg_state, unsigned int dcp_idx) +{ + struct qm_mc_command *mcc; + struct qm_mc_result *mcr; + struct qman_portal *p; + unsigned long irqflags __maybe_unused; + u8 res; + int i, j; + + p = get_affine_portal(); + PORTAL_IRQ_LOCK(p, irqflags); + + mcc = qm_mc_start(&p->p); + for (i = 0; i < 1 ; i++) { + mcc->ccgr_query.ccgrid = i; + mcc->ccgr_query.dcpid = dcp_idx; + qm_mc_commit(&p->p, QM_CEETM_VERB_CCGR_QUERY); + + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == + QM_CEETM_VERB_CCGR_QUERY); + res = mcr->result; + if (res == QM_MCR_RESULT_OK) { + for (j = 0; j < 16; j++) + *(ccg_state + j) = + mcr->ccgr_query.congestion_state.ccg_state[j]; + } else { + pr_err("QUERY CEETM CONGESTION STATE failed\n"); + return -EIO; + } + } + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + return 0; +} + +int qman_set_wpm(int wpm_enable) +{ + return qm_set_wpm(wpm_enable); +} +EXPORT_SYMBOL(qman_set_wpm); + +int qman_get_wpm(int *wpm_enable) +{ + return qm_get_wpm(wpm_enable); +} +EXPORT_SYMBOL(qman_get_wpm); diff --git a/drivers/staging/fsl_qbman/qman_low.h b/drivers/staging/fsl_qbman/qman_low.h new file mode 100644 index 0000000..d517af5 --- /dev/null +++ b/drivers/staging/fsl_qbman/qman_low.h @@ -0,0 +1,1171 @@ +/* Copyright 2008-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "qman_private.h" + +/***************************/ +/* Portal register assists */ +/***************************/ + +/* Cache-inhibited register offsets */ +#define REG_EQCR_PI_CINH 0x0000 +#define REG_EQCR_CI_CINH 0x0004 +#define REG_EQCR_ITR 0x0008 +#define REG_DQRR_PI_CINH 0x0040 +#define REG_DQRR_CI_CINH 0x0044 +#define REG_DQRR_ITR 0x0048 +#define REG_DQRR_DCAP 0x0050 +#define REG_DQRR_SDQCR 0x0054 +#define REG_DQRR_VDQCR 0x0058 +#define REG_DQRR_PDQCR 0x005c +#define REG_MR_PI_CINH 0x0080 +#define REG_MR_CI_CINH 0x0084 +#define REG_MR_ITR 0x0088 +#define REG_CFG 0x0100 +#define REG_ISR 0x0e00 +#define REG_ITPR 0x0e14 + +/* Cache-enabled register offsets */ +#define CL_EQCR 0x0000 +#define CL_DQRR 0x1000 +#define CL_MR 0x2000 +#define CL_EQCR_PI_CENA 0x3000 +#define CL_EQCR_CI_CENA 0x3100 +#define CL_DQRR_PI_CENA 0x3200 +#define CL_DQRR_CI_CENA 0x3300 +#define CL_MR_PI_CENA 0x3400 +#define CL_MR_CI_CENA 0x3500 +#define CL_CR 0x3800 +#define CL_RR0 0x3900 +#define CL_RR1 0x3940 + +/* BTW, the drivers (and h/w programming model) already obtain the required + * synchronisation for portal accesses via lwsync(), hwsync(), and + * data-dependencies. Use of barrier()s or other order-preserving primitives + * simply degrade performance. Hence the use of the __raw_*() interfaces, which + * simply ensure that the compiler treats the portal registers as volatile (ie. + * non-coherent). */ + +/* Cache-inhibited register access. */ +#define __qm_in(qm, o) __raw_readl((qm)->addr_ci + (o)) +#define __qm_out(qm, o, val) __raw_writel((val), (qm)->addr_ci + (o)) +#define qm_in(reg) __qm_in(&portal->addr, REG_##reg) +#define qm_out(reg, val) __qm_out(&portal->addr, REG_##reg, val) + +/* Cache-enabled (index) register access */ +#define __qm_cl_touch_ro(qm, o) dcbt_ro((qm)->addr_ce + (o)) +#define __qm_cl_touch_rw(qm, o) dcbt_rw((qm)->addr_ce + (o)) +#define __qm_cl_in(qm, o) __raw_readl((qm)->addr_ce + (o)) +#define __qm_cl_out(qm, o, val) \ + do { \ + u32 *__tmpclout = (qm)->addr_ce + (o); \ + __raw_writel((val), __tmpclout); \ + dcbf(__tmpclout); \ + } while (0) +#define __qm_cl_invalidate(qm, o) dcbi((qm)->addr_ce + (o)) +#define qm_cl_touch_ro(reg) __qm_cl_touch_ro(&portal->addr, CL_##reg##_CENA) +#define qm_cl_touch_rw(reg) __qm_cl_touch_rw(&portal->addr, CL_##reg##_CENA) +#define qm_cl_in(reg) __qm_cl_in(&portal->addr, CL_##reg##_CENA) +#define qm_cl_out(reg, val) __qm_cl_out(&portal->addr, CL_##reg##_CENA, val) +#define qm_cl_invalidate(reg) __qm_cl_invalidate(&portal->addr, CL_##reg##_CENA) + +/* Cache-enabled ring access */ +#define qm_cl(base, idx) ((void *)base + ((idx) << 6)) + +/* Cyclic helper for rings. FIXME: once we are able to do fine-grain perf + * analysis, look at using the "extra" bit in the ring index registers to avoid + * cyclic issues. */ +static inline u8 cyc_diff(u8 ringsize, u8 first, u8 last) +{ + /* 'first' is included, 'last' is excluded */ + if (first <= last) + return last - first; + return ringsize + last - first; +} + +/* Portal modes. + * Enum types; + * pmode == production mode + * cmode == consumption mode, + * dmode == h/w dequeue mode. + * Enum values use 3 letter codes. First letter matches the portal mode, + * remaining two letters indicate; + * ci == cache-inhibited portal register + * ce == cache-enabled portal register + * vb == in-band valid-bit (cache-enabled) + * dc == DCA (Discrete Consumption Acknowledgement), DQRR-only + * As for "enum qm_dqrr_dmode", it should be self-explanatory. + */ +enum qm_eqcr_pmode { /* matches QCSP_CFG::EPM */ + qm_eqcr_pci = 0, /* PI index, cache-inhibited */ + qm_eqcr_pce = 1, /* PI index, cache-enabled */ + qm_eqcr_pvb = 2 /* valid-bit */ +}; +enum qm_eqcr_cmode { /* s/w-only */ + qm_eqcr_cci, /* CI index, cache-inhibited */ + qm_eqcr_cce /* CI index, cache-enabled */ +}; +enum qm_dqrr_dmode { /* matches QCSP_CFG::DP */ + qm_dqrr_dpush = 0, /* SDQCR + VDQCR */ + qm_dqrr_dpull = 1 /* PDQCR */ +}; +enum qm_dqrr_pmode { /* s/w-only */ + qm_dqrr_pci, /* reads DQRR_PI_CINH */ + qm_dqrr_pce, /* reads DQRR_PI_CENA */ + qm_dqrr_pvb /* reads valid-bit */ +}; +enum qm_dqrr_cmode { /* matches QCSP_CFG::DCM */ + qm_dqrr_cci = 0, /* CI index, cache-inhibited */ + qm_dqrr_cce = 1, /* CI index, cache-enabled */ + qm_dqrr_cdc = 2 /* Discrete Consumption Acknowledgement */ +}; +enum qm_mr_pmode { /* s/w-only */ + qm_mr_pci, /* reads MR_PI_CINH */ + qm_mr_pce, /* reads MR_PI_CENA */ + qm_mr_pvb /* reads valid-bit */ +}; +enum qm_mr_cmode { /* matches QCSP_CFG::MM */ + qm_mr_cci = 0, /* CI index, cache-inhibited */ + qm_mr_cce = 1 /* CI index, cache-enabled */ +}; + + +/* ------------------------- */ +/* --- Portal structures --- */ + +#define QM_EQCR_SIZE 8 +#define QM_DQRR_SIZE 16 +#define QM_MR_SIZE 8 + +struct qm_eqcr { + struct qm_eqcr_entry *ring, *cursor; + u8 ci, available, ithresh, vbit; +#ifdef CONFIG_FSL_DPA_CHECKING + u32 busy; + enum qm_eqcr_pmode pmode; + enum qm_eqcr_cmode cmode; +#endif +}; + +struct qm_dqrr { + const struct qm_dqrr_entry *ring, *cursor; + u8 pi, ci, fill, ithresh, vbit; +#ifdef CONFIG_FSL_DPA_CHECKING + enum qm_dqrr_dmode dmode; + enum qm_dqrr_pmode pmode; + enum qm_dqrr_cmode cmode; +#endif +}; + +struct qm_mr { + const struct qm_mr_entry *ring, *cursor; + u8 pi, ci, fill, ithresh, vbit; +#ifdef CONFIG_FSL_DPA_CHECKING + enum qm_mr_pmode pmode; + enum qm_mr_cmode cmode; +#endif +}; + +struct qm_mc { + struct qm_mc_command *cr; + struct qm_mc_result *rr; + u8 rridx, vbit; +#ifdef CONFIG_FSL_DPA_CHECKING + enum { + /* Can be _mc_start()ed */ + mc_idle, + /* Can be _mc_commit()ed or _mc_abort()ed */ + mc_user, + /* Can only be _mc_retry()ed */ + mc_hw + } state; +#endif +}; + +#ifdef CONFIG_FSL_QMAN_BUG_AND_FEATURE_REV1 +/* For workarounds that require storage. The struct alignment is required for + * cases where operations on "shadow" structs need the same alignment as is + * present on the corresponding h/w data structs (specifically, there is a + * zero-bit present above the range required to address the ring, so that + * iteration can be achieved by incrementing a ring pointer and clearing the + * carry-bit). The "portal" struct needs the same alignment because this type + * goes at its head, so it has a more radical alignment requirement if this + * structure is used. (NB: "64" instead of "L1_CACHE_BYTES", because this + * alignment relates to the h/w interface, not the CPU cache granularity!)*/ +#define QM_PORTAL_ALIGNMENT __attribute__((aligned(32 * 64))) +struct qm_portal_bugs { + /* shadow MR ring, for QMAN9 workaround, 8-CL-aligned */ + struct qm_mr_entry mr[QM_MR_SIZE]; + /* shadow MC result, for QMAN6 and QMAN7 workarounds, CL-aligned */ + struct qm_mc_result result; + /* boolean switch for QMAN7 workaround */ + int initfq_and_sched; +} QM_PORTAL_ALIGNMENT; +#else +#define QM_PORTAL_ALIGNMENT ____cacheline_aligned +#endif + +struct qm_addr { + void __iomem *addr_ce; /* cache-enabled */ + void __iomem *addr_ci; /* cache-inhibited */ +}; + +struct qm_portal { +#ifdef CONFIG_FSL_QMAN_BUG_AND_FEATURE_REV1 + struct qm_portal_bugs bugs; +#endif + /* In the non-CONFIG_FSL_DPA_CHECKING case, the following stuff up to + * and including 'mc' fits within a cacheline (yay!). The 'config' part + * is setup-only, so isn't a cause for a concern. In other words, don't + * rearrange this structure on a whim, there be dragons ... */ + struct qm_addr addr; + struct qm_eqcr eqcr; + struct qm_dqrr dqrr; + struct qm_mr mr; + struct qm_mc mc; +} QM_PORTAL_ALIGNMENT; + + +/* ---------------- */ +/* --- EQCR API --- */ + +/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */ +#define EQCR_CARRYCLEAR(p) \ + (void *)((unsigned long)(p) & (~(unsigned long)(QM_EQCR_SIZE << 6))) + +/* Bit-wise logic to convert a ring pointer to a ring index */ +static inline u8 EQCR_PTR2IDX(struct qm_eqcr_entry *e) +{ + return ((uintptr_t)e >> 6) & (QM_EQCR_SIZE - 1); +} + +/* Increment the 'cursor' ring pointer, taking 'vbit' into account */ +static inline void EQCR_INC(struct qm_eqcr *eqcr) +{ + /* NB: this is odd-looking, but experiments show that it generates fast + * code with essentially no branching overheads. We increment to the + * next EQCR pointer and handle overflow and 'vbit'. */ + struct qm_eqcr_entry *partial = eqcr->cursor + 1; + eqcr->cursor = EQCR_CARRYCLEAR(partial); + if (partial != eqcr->cursor) + eqcr->vbit ^= QM_EQCR_VERB_VBIT; +} + +static inline int qm_eqcr_init(struct qm_portal *portal, + enum qm_eqcr_pmode pmode, + __maybe_unused enum qm_eqcr_cmode cmode) +{ + /* This use of 'register', as well as all other occurances, is because + * it has been observed to generate much faster code with gcc than is + * otherwise the case. */ + register struct qm_eqcr *eqcr = &portal->eqcr; + u32 cfg; + u8 pi; + + eqcr->ring = portal->addr.addr_ce + CL_EQCR; + eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1); + qm_cl_invalidate(EQCR_CI); + pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1); + eqcr->cursor = eqcr->ring + pi; + eqcr->vbit = (qm_in(EQCR_PI_CINH) & QM_EQCR_SIZE) ? + QM_EQCR_VERB_VBIT : 0; + eqcr->available = QM_EQCR_SIZE - 1 - + cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi); + eqcr->ithresh = qm_in(EQCR_ITR); +#ifdef CONFIG_FSL_DPA_CHECKING + eqcr->busy = 0; + eqcr->pmode = pmode; + eqcr->cmode = cmode; +#endif + cfg = (qm_in(CFG) & 0x00ffffff) | + ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */ + qm_out(CFG, cfg); + return 0; +} + +static inline void qm_eqcr_finish(struct qm_portal *portal) +{ + register struct qm_eqcr *eqcr = &portal->eqcr; + u8 pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1); + u8 ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1); + + DPA_ASSERT(!eqcr->busy); + if (pi != EQCR_PTR2IDX(eqcr->cursor)) + pr_crit("losing uncommited EQCR entries\n"); + if (ci != eqcr->ci) + pr_crit("missing existing EQCR completions\n"); + if (eqcr->ci != EQCR_PTR2IDX(eqcr->cursor)) + pr_crit("EQCR destroyed unquiesced\n"); +} + +static inline struct qm_eqcr_entry *qm_eqcr_start(struct qm_portal *portal) +{ + register struct qm_eqcr *eqcr = &portal->eqcr; + DPA_ASSERT(!eqcr->busy); + if (!eqcr->available) + return NULL; +#ifdef CONFIG_FSL_DPA_CHECKING + eqcr->busy = 1; +#endif + dcbz_64(eqcr->cursor); + return eqcr->cursor; +} + +static inline void qm_eqcr_abort(struct qm_portal *portal) +{ + __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr; + DPA_ASSERT(eqcr->busy); +#ifdef CONFIG_FSL_DPA_CHECKING + eqcr->busy = 0; +#endif +} + +static inline struct qm_eqcr_entry *qm_eqcr_pend_and_next( + struct qm_portal *portal, u8 myverb) +{ + register struct qm_eqcr *eqcr = &portal->eqcr; + DPA_ASSERT(eqcr->busy); + DPA_ASSERT(eqcr->pmode != qm_eqcr_pvb); + if (eqcr->available == 1) + return NULL; + eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit; + dcbf(eqcr->cursor); + EQCR_INC(eqcr); + eqcr->available--; + dcbz_64(eqcr->cursor); + return eqcr->cursor; +} + +#define EQCR_COMMIT_CHECKS(eqcr) \ +do { \ + DPA_ASSERT(eqcr->busy); \ + DPA_ASSERT(eqcr->cursor->orp == (eqcr->cursor->orp & 0x00ffffff)); \ + DPA_ASSERT(eqcr->cursor->fqid == (eqcr->cursor->fqid & 0x00ffffff)); \ +} while(0) + +static inline void qm_eqcr_pci_commit(struct qm_portal *portal, u8 myverb) +{ + register struct qm_eqcr *eqcr = &portal->eqcr; + EQCR_COMMIT_CHECKS(eqcr); + DPA_ASSERT(eqcr->pmode == qm_eqcr_pci); + eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit; + EQCR_INC(eqcr); + eqcr->available--; + dcbf(eqcr->cursor); + hwsync(); + qm_out(EQCR_PI_CINH, EQCR_PTR2IDX(eqcr->cursor)); +#ifdef CONFIG_FSL_DPA_CHECKING + eqcr->busy = 0; +#endif +} + +static inline void qm_eqcr_pce_prefetch(struct qm_portal *portal) +{ + __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr; + DPA_ASSERT(eqcr->pmode == qm_eqcr_pce); + qm_cl_invalidate(EQCR_PI); + qm_cl_touch_rw(EQCR_PI); +} + +static inline void qm_eqcr_pce_commit(struct qm_portal *portal, u8 myverb) +{ + register struct qm_eqcr *eqcr = &portal->eqcr; + EQCR_COMMIT_CHECKS(eqcr); + DPA_ASSERT(eqcr->pmode == qm_eqcr_pce); + eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit; + EQCR_INC(eqcr); + eqcr->available--; + dcbf(eqcr->cursor); + lwsync(); + qm_cl_out(EQCR_PI, EQCR_PTR2IDX(eqcr->cursor)); +#ifdef CONFIG_FSL_DPA_CHECKING + eqcr->busy = 0; +#endif +} + +static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb) +{ + register struct qm_eqcr *eqcr = &portal->eqcr; + struct qm_eqcr_entry *eqcursor; + EQCR_COMMIT_CHECKS(eqcr); + DPA_ASSERT(eqcr->pmode == qm_eqcr_pvb); + lwsync(); + eqcursor = eqcr->cursor; + eqcursor->__dont_write_directly__verb = myverb | eqcr->vbit; + dcbf(eqcursor); + EQCR_INC(eqcr); + eqcr->available--; +#ifdef CONFIG_FSL_DPA_CHECKING + eqcr->busy = 0; +#endif +} + +static inline u8 qm_eqcr_cci_update(struct qm_portal *portal) +{ + register struct qm_eqcr *eqcr = &portal->eqcr; + u8 diff, old_ci = eqcr->ci; + DPA_ASSERT(eqcr->cmode == qm_eqcr_cci); + eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1); + diff = cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci); + eqcr->available += diff; + return diff; +} + +static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal) +{ + __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr; + DPA_ASSERT(eqcr->cmode == qm_eqcr_cce); + qm_cl_touch_ro(EQCR_CI); +} + +static inline u8 qm_eqcr_cce_update(struct qm_portal *portal) +{ + register struct qm_eqcr *eqcr = &portal->eqcr; + u8 diff, old_ci = eqcr->ci; + DPA_ASSERT(eqcr->cmode == qm_eqcr_cce); + eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1); + qm_cl_invalidate(EQCR_CI); + diff = cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci); + eqcr->available += diff; + return diff; +} + +static inline u8 qm_eqcr_get_ithresh(struct qm_portal *portal) +{ + register struct qm_eqcr *eqcr = &portal->eqcr; + return eqcr->ithresh; +} + +static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh) +{ + register struct qm_eqcr *eqcr = &portal->eqcr; + eqcr->ithresh = ithresh; + qm_out(EQCR_ITR, ithresh); +} + +static inline u8 qm_eqcr_get_avail(struct qm_portal *portal) +{ + register struct qm_eqcr *eqcr = &portal->eqcr; + return eqcr->available; +} + +static inline u8 qm_eqcr_get_fill(struct qm_portal *portal) +{ + register struct qm_eqcr *eqcr = &portal->eqcr; + return QM_EQCR_SIZE - 1 - eqcr->available; +} + + +/* ---------------- */ +/* --- DQRR API --- */ + +/* FIXME: many possible improvements; + * - look at changing the API to use pointer rather than index parameters now + * that 'cursor' is a pointer, + * - consider moving other parameters to pointer if it could help (ci) + */ + +#define DQRR_CARRYCLEAR(p) \ + (void *)((unsigned long)(p) & (~(unsigned long)(QM_DQRR_SIZE << 6))) + +static inline u8 DQRR_PTR2IDX(const struct qm_dqrr_entry *e) +{ + return ((uintptr_t)e >> 6) & (QM_DQRR_SIZE - 1); +} + +static inline const struct qm_dqrr_entry *DQRR_INC( + const struct qm_dqrr_entry *e) +{ + return DQRR_CARRYCLEAR(e + 1); +} + +static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf) +{ + qm_out(CFG, (qm_in(CFG) & 0xff0fffff) | + ((mf & (QM_DQRR_SIZE - 1)) << 20)); +} + +static inline int qm_dqrr_init(struct qm_portal *portal, + const struct qm_portal_config *config, + enum qm_dqrr_dmode dmode, + __maybe_unused enum qm_dqrr_pmode pmode, + enum qm_dqrr_cmode cmode, u8 max_fill) +{ + register struct qm_dqrr *dqrr = &portal->dqrr; + u32 cfg; + + /* Make sure the DQRR will be idle when we enable */ + qm_out(DQRR_SDQCR, 0); + qm_out(DQRR_VDQCR, 0); + qm_out(DQRR_PDQCR, 0); + dqrr->ring = portal->addr.addr_ce + CL_DQRR; + dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1); + dqrr->ci = qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1); + dqrr->cursor = dqrr->ring + dqrr->ci; + dqrr->fill = cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi); + dqrr->vbit = (qm_in(DQRR_PI_CINH) & QM_DQRR_SIZE) ? + QM_DQRR_VERB_VBIT : 0; + dqrr->ithresh = qm_in(DQRR_ITR); +#ifdef CONFIG_FSL_DPA_CHECKING + dqrr->dmode = dmode; + dqrr->pmode = pmode; + dqrr->cmode = cmode; +#endif + /* Invalidate every ring entry before beginning */ + for (cfg = 0; cfg > QM_DQRR_SIZE; cfg++) + dcbi(qm_cl(dqrr->ring, cfg)); + cfg = (qm_in(CFG) & 0xff000f00) | + ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */ + ((dmode & 1) << 18) | /* DP */ + ((cmode & 3) << 16) | /* DCM */ + 0xa0 | /* RE+SE */ + (0 ? 0x40 : 0) | /* Ignore RP */ + (0 ? 0x10 : 0); /* Ignore SP */ + qm_out(CFG, cfg); + qm_dqrr_set_maxfill(portal, max_fill); + return 0; +} + +static inline void qm_dqrr_finish(struct qm_portal *portal) +{ + __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; +#ifdef CONFIG_FSL_DPA_CHECKING + if ((dqrr->cmode != qm_dqrr_cdc) && + (dqrr->ci != DQRR_PTR2IDX(dqrr->cursor))) + pr_crit("Ignoring completed DQRR entries\n"); +#endif +} + +static inline const struct qm_dqrr_entry *qm_dqrr_current( + struct qm_portal *portal) +{ + register struct qm_dqrr *dqrr = &portal->dqrr; + if (!dqrr->fill) + return NULL; + return dqrr->cursor; +} + +static inline u8 qm_dqrr_cursor(struct qm_portal *portal) +{ + register struct qm_dqrr *dqrr = &portal->dqrr; + return DQRR_PTR2IDX(dqrr->cursor); +} + +static inline u8 qm_dqrr_next(struct qm_portal *portal) +{ + register struct qm_dqrr *dqrr = &portal->dqrr; + DPA_ASSERT(dqrr->fill); + dqrr->cursor = DQRR_INC(dqrr->cursor); + return --dqrr->fill; +} + +static inline u8 qm_dqrr_pci_update(struct qm_portal *portal) +{ + register struct qm_dqrr *dqrr = &portal->dqrr; + u8 diff, old_pi = dqrr->pi; + DPA_ASSERT(dqrr->pmode == qm_dqrr_pci); + dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1); + diff = cyc_diff(QM_DQRR_SIZE, old_pi, dqrr->pi); + dqrr->fill += diff; + return diff; +} + +static inline void qm_dqrr_pce_prefetch(struct qm_portal *portal) +{ + __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; + DPA_ASSERT(dqrr->pmode == qm_dqrr_pce); + qm_cl_invalidate(DQRR_PI); + qm_cl_touch_ro(DQRR_PI); +} + +static inline u8 qm_dqrr_pce_update(struct qm_portal *portal) +{ + register struct qm_dqrr *dqrr = &portal->dqrr; + u8 diff, old_pi = dqrr->pi; + DPA_ASSERT(dqrr->pmode == qm_dqrr_pce); + dqrr->pi = qm_cl_in(DQRR_PI) & (QM_DQRR_SIZE - 1); + diff = cyc_diff(QM_DQRR_SIZE, old_pi, dqrr->pi); + dqrr->fill += diff; + return diff; +} + +static inline void qm_dqrr_pvb_update(struct qm_portal *portal) +{ + register struct qm_dqrr *dqrr = &portal->dqrr; + const struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi); + DPA_ASSERT(dqrr->pmode == qm_dqrr_pvb); + /* when accessing 'verb', use __raw_readb() to ensure that compiler + * inlining doesn't try to optimise out "excess reads". */ + if ((__raw_readb(&res->verb) & QM_DQRR_VERB_VBIT) == dqrr->vbit) { + dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1); + if (!dqrr->pi) + dqrr->vbit ^= QM_DQRR_VERB_VBIT; + dqrr->fill++; + } +} + +static inline void qm_dqrr_cci_consume(struct qm_portal *portal, u8 num) +{ + register struct qm_dqrr *dqrr = &portal->dqrr; + DPA_ASSERT(dqrr->cmode == qm_dqrr_cci); + dqrr->ci = (dqrr->ci + num) & (QM_DQRR_SIZE - 1); + qm_out(DQRR_CI_CINH, dqrr->ci); +} + +static inline void qm_dqrr_cci_consume_to_current(struct qm_portal *portal) +{ + register struct qm_dqrr *dqrr = &portal->dqrr; + DPA_ASSERT(dqrr->cmode == qm_dqrr_cci); + dqrr->ci = DQRR_PTR2IDX(dqrr->cursor); + qm_out(DQRR_CI_CINH, dqrr->ci); +} + +static inline void qm_dqrr_cce_prefetch(struct qm_portal *portal) +{ + __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; + DPA_ASSERT(dqrr->cmode == qm_dqrr_cce); + qm_cl_invalidate(DQRR_CI); + qm_cl_touch_rw(DQRR_CI); +} + +static inline void qm_dqrr_cce_consume(struct qm_portal *portal, u8 num) +{ + register struct qm_dqrr *dqrr = &portal->dqrr; + DPA_ASSERT(dqrr->cmode == qm_dqrr_cce); + dqrr->ci = (dqrr->ci + num) & (QM_DQRR_SIZE - 1); + qm_cl_out(DQRR_CI, dqrr->ci); +} + +static inline void qm_dqrr_cce_consume_to_current(struct qm_portal *portal) +{ + register struct qm_dqrr *dqrr = &portal->dqrr; + DPA_ASSERT(dqrr->cmode == qm_dqrr_cce); + dqrr->ci = DQRR_PTR2IDX(dqrr->cursor); + qm_cl_out(DQRR_CI, dqrr->ci); +} + +static inline void qm_dqrr_cdc_consume_1(struct qm_portal *portal, u8 idx, + int park) +{ + __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; + DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc); + DPA_ASSERT(idx < QM_DQRR_SIZE); + qm_out(DQRR_DCAP, (0 << 8) | /* S */ + ((park ? 1 : 0) << 6) | /* PK */ + idx); /* DCAP_CI */ +} + +static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal, + const struct qm_dqrr_entry *dq, + int park) +{ + __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; + u8 idx = DQRR_PTR2IDX(dq); + DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc); + DPA_ASSERT((dqrr->ring + idx) == dq); + DPA_ASSERT(idx < QM_DQRR_SIZE); + qm_out(DQRR_DCAP, (0 << 8) | /* DQRR_DCAP::S */ + ((park ? 1 : 0) << 6) | /* DQRR_DCAP::PK */ + idx); /* DQRR_DCAP::DCAP_CI */ +} + +static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u16 bitmask) +{ + __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; + DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc); + qm_out(DQRR_DCAP, (1 << 8) | /* DQRR_DCAP::S */ + ((u32)bitmask << 16)); /* DQRR_DCAP::DCAP_CI */ +} + +static inline u8 qm_dqrr_cdc_cci(struct qm_portal *portal) +{ + __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; + DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc); + return qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1); +} + +static inline void qm_dqrr_cdc_cce_prefetch(struct qm_portal *portal) +{ + __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; + DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc); + qm_cl_invalidate(DQRR_CI); + qm_cl_touch_ro(DQRR_CI); +} + +static inline u8 qm_dqrr_cdc_cce(struct qm_portal *portal) +{ + __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; + DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc); + return qm_cl_in(DQRR_CI) & (QM_DQRR_SIZE - 1); +} + +static inline u8 qm_dqrr_get_ci(struct qm_portal *portal) +{ + register struct qm_dqrr *dqrr = &portal->dqrr; + DPA_ASSERT(dqrr->cmode != qm_dqrr_cdc); + return dqrr->ci; +} + +static inline void qm_dqrr_park(struct qm_portal *portal, u8 idx) +{ + __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; + DPA_ASSERT(dqrr->cmode != qm_dqrr_cdc); + qm_out(DQRR_DCAP, (0 << 8) | /* S */ + (1 << 6) | /* PK */ + (idx & (QM_DQRR_SIZE - 1))); /* DCAP_CI */ +} + +static inline void qm_dqrr_park_current(struct qm_portal *portal) +{ + register struct qm_dqrr *dqrr = &portal->dqrr; + DPA_ASSERT(dqrr->cmode != qm_dqrr_cdc); + qm_out(DQRR_DCAP, (0 << 8) | /* S */ + (1 << 6) | /* PK */ + DQRR_PTR2IDX(dqrr->cursor)); /* DCAP_CI */ +} + +static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr) +{ + qm_out(DQRR_SDQCR, sdqcr); +} + +static inline u32 qm_dqrr_sdqcr_get(struct qm_portal *portal) +{ + return qm_in(DQRR_SDQCR); +} + +static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr) +{ + qm_out(DQRR_VDQCR, vdqcr); +} + +static inline u32 qm_dqrr_vdqcr_get(struct qm_portal *portal) +{ + return qm_in(DQRR_VDQCR); +} + +static inline void qm_dqrr_pdqcr_set(struct qm_portal *portal, u32 pdqcr) +{ + qm_out(DQRR_PDQCR, pdqcr); +} + +static inline u32 qm_dqrr_pdqcr_get(struct qm_portal *portal) +{ + return qm_in(DQRR_PDQCR); +} + +static inline u8 qm_dqrr_get_ithresh(struct qm_portal *portal) +{ + register struct qm_dqrr *dqrr = &portal->dqrr; + return dqrr->ithresh; +} + +static inline void qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh) +{ + qm_out(DQRR_ITR, ithresh); +} + +static inline u8 qm_dqrr_get_maxfill(struct qm_portal *portal) +{ + return (qm_in(CFG) & 0x00f00000) >> 20; +} + + +/* -------------- */ +/* --- MR API --- */ + +#define MR_CARRYCLEAR(p) \ + (void *)((unsigned long)(p) & (~(unsigned long)(QM_MR_SIZE << 6))) + +static inline u8 MR_PTR2IDX(const struct qm_mr_entry *e) +{ + return ((uintptr_t)e >> 6) & (QM_MR_SIZE - 1); +} + +static inline const struct qm_mr_entry *MR_INC(const struct qm_mr_entry *e) +{ + return MR_CARRYCLEAR(e + 1); +} + +#ifdef CONFIG_FSL_QMAN_BUG_AND_FEATURE_REV1 +static inline void __mr_copy_and_fixup(struct qm_portal *p, u8 idx) +{ + if (qman_ip_rev == QMAN_REV10) { + struct qm_mr_entry *shadow = qm_cl(p->bugs.mr, idx); + struct qm_mr_entry *res = qm_cl(p->mr.ring, idx); + copy_words(shadow, res, sizeof(*res)); + /* Bypass the QM_MR_RC_*** definitions, and check the byte value + * directly to handle the erratum. */ + if (shadow->ern.rc == 0x06) + shadow->ern.rc = 0x60; + } +} +#else +#define __mr_copy_and_fixup(p, idx) do { ; } while (0) +#endif + +static inline int qm_mr_init(struct qm_portal *portal, enum qm_mr_pmode pmode, + enum qm_mr_cmode cmode) +{ + register struct qm_mr *mr = &portal->mr; + u32 cfg; + int loop; + +#ifdef CONFIG_FSL_QMAN_BUG_AND_FEATURE_REV1 + if ((qman_ip_rev == QMAN_REV10) && (pmode != qm_mr_pvb)) { + pr_err("Qman is rev1, so QMAN9 workaround requires 'pvb'\n"); + return -EINVAL; + } +#endif + mr->ring = portal->addr.addr_ce + CL_MR; + mr->pi = qm_in(MR_PI_CINH) & (QM_MR_SIZE - 1); + mr->ci = qm_in(MR_CI_CINH) & (QM_MR_SIZE - 1); +#ifdef CONFIG_FSL_QMAN_BUG_AND_FEATURE_REV1 + if (qman_ip_rev == QMAN_REV10) + /* Situate the cursor in the shadow ring */ + mr->cursor = portal->bugs.mr + mr->ci; + else +#endif + mr->cursor = mr->ring + mr->ci; + mr->fill = cyc_diff(QM_MR_SIZE, mr->ci, mr->pi); + mr->vbit = (qm_in(MR_PI_CINH) & QM_MR_SIZE) ? QM_MR_VERB_VBIT : 0; + mr->ithresh = qm_in(MR_ITR); +#ifdef CONFIG_FSL_DPA_CHECKING + mr->pmode = pmode; + mr->cmode = cmode; +#endif + /* Only new entries get the copy-and-fixup treatment from + * qm_mr_pvb_update(), so perform it here for any stale entries. */ + for (loop = 0; loop < mr->fill; loop++) + __mr_copy_and_fixup(portal, (mr->ci + loop) & (QM_MR_SIZE - 1)); + cfg = (qm_in(CFG) & 0xfffff0ff) | + ((cmode & 1) << 8); /* QCSP_CFG:MM */ + qm_out(CFG, cfg); + return 0; +} + +static inline void qm_mr_finish(struct qm_portal *portal) +{ + register struct qm_mr *mr = &portal->mr; + if (mr->ci != MR_PTR2IDX(mr->cursor)) + pr_crit("Ignoring completed MR entries\n"); +} + +static inline const struct qm_mr_entry *qm_mr_current(struct qm_portal *portal) +{ + register struct qm_mr *mr = &portal->mr; + if (!mr->fill) + return NULL; + return mr->cursor; +} + +static inline u8 qm_mr_cursor(struct qm_portal *portal) +{ + register struct qm_mr *mr = &portal->mr; + return MR_PTR2IDX(mr->cursor); +} + +static inline u8 qm_mr_next(struct qm_portal *portal) +{ + register struct qm_mr *mr = &portal->mr; + DPA_ASSERT(mr->fill); + mr->cursor = MR_INC(mr->cursor); + return --mr->fill; +} + +static inline u8 qm_mr_pci_update(struct qm_portal *portal) +{ + register struct qm_mr *mr = &portal->mr; + u8 diff, old_pi = mr->pi; + DPA_ASSERT(mr->pmode == qm_mr_pci); + mr->pi = qm_in(MR_PI_CINH); + diff = cyc_diff(QM_MR_SIZE, old_pi, mr->pi); + mr->fill += diff; + return diff; +} + +static inline void qm_mr_pce_prefetch(struct qm_portal *portal) +{ + __maybe_unused register struct qm_mr *mr = &portal->mr; + DPA_ASSERT(mr->pmode == qm_mr_pce); + qm_cl_invalidate(MR_PI); + qm_cl_touch_ro(MR_PI); +} + +static inline u8 qm_mr_pce_update(struct qm_portal *portal) +{ + register struct qm_mr *mr = &portal->mr; + u8 diff, old_pi = mr->pi; + DPA_ASSERT(mr->pmode == qm_mr_pce); + mr->pi = qm_cl_in(MR_PI) & (QM_MR_SIZE - 1); + diff = cyc_diff(QM_MR_SIZE, old_pi, mr->pi); + mr->fill += diff; + return diff; +} + +static inline void qm_mr_pvb_update(struct qm_portal *portal) +{ + register struct qm_mr *mr = &portal->mr; + const struct qm_mr_entry *res = qm_cl(mr->ring, mr->pi); + DPA_ASSERT(mr->pmode == qm_mr_pvb); + /* when accessing 'verb', use __raw_readb() to ensure that compiler + * inlining doesn't try to optimise out "excess reads". */ + if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) { + __mr_copy_and_fixup(portal, mr->pi); + mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1); + if (!mr->pi) + mr->vbit ^= QM_MR_VERB_VBIT; + mr->fill++; + res = MR_INC(res); + } + dcbit_ro(res); +} + +static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num) +{ + register struct qm_mr *mr = &portal->mr; + DPA_ASSERT(mr->cmode == qm_mr_cci); + mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1); + qm_out(MR_CI_CINH, mr->ci); +} + +static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal) +{ + register struct qm_mr *mr = &portal->mr; + DPA_ASSERT(mr->cmode == qm_mr_cci); + mr->ci = MR_PTR2IDX(mr->cursor); + qm_out(MR_CI_CINH, mr->ci); +} + +static inline void qm_mr_cce_prefetch(struct qm_portal *portal) +{ + __maybe_unused register struct qm_mr *mr = &portal->mr; + DPA_ASSERT(mr->cmode == qm_mr_cce); + qm_cl_invalidate(MR_CI); + qm_cl_touch_rw(MR_CI); +} + +static inline void qm_mr_cce_consume(struct qm_portal *portal, u8 num) +{ + register struct qm_mr *mr = &portal->mr; + DPA_ASSERT(mr->cmode == qm_mr_cce); + mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1); + qm_cl_out(MR_CI, mr->ci); +} + +static inline void qm_mr_cce_consume_to_current(struct qm_portal *portal) +{ + register struct qm_mr *mr = &portal->mr; + DPA_ASSERT(mr->cmode == qm_mr_cce); + mr->ci = MR_PTR2IDX(mr->cursor); + qm_cl_out(MR_CI, mr->ci); +} + +static inline u8 qm_mr_get_ci(struct qm_portal *portal) +{ + register struct qm_mr *mr = &portal->mr; + return mr->ci; +} + +static inline u8 qm_mr_get_ithresh(struct qm_portal *portal) +{ + register struct qm_mr *mr = &portal->mr; + return mr->ithresh; +} + +static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh) +{ + qm_out(MR_ITR, ithresh); +} + + +/* ------------------------------ */ +/* --- Management command API --- */ + +static inline int qm_mc_init(struct qm_portal *portal) +{ + register struct qm_mc *mc = &portal->mc; + mc->cr = portal->addr.addr_ce + CL_CR; + mc->rr = portal->addr.addr_ce + CL_RR0; + mc->rridx = (__raw_readb(&mc->cr->__dont_write_directly__verb) & + QM_MCC_VERB_VBIT) ? 0 : 1; + mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0; +#ifdef CONFIG_FSL_DPA_CHECKING + mc->state = mc_idle; +#endif + return 0; +} + +static inline void qm_mc_finish(struct qm_portal *portal) +{ + __maybe_unused register struct qm_mc *mc = &portal->mc; + DPA_ASSERT(mc->state == mc_idle); +#ifdef CONFIG_FSL_DPA_CHECKING + if (mc->state != mc_idle) + pr_crit("Losing incomplete MC command\n"); +#endif +} + +static inline struct qm_mc_command *qm_mc_start(struct qm_portal *portal) +{ + register struct qm_mc *mc = &portal->mc; + DPA_ASSERT(mc->state == mc_idle); +#ifdef CONFIG_FSL_DPA_CHECKING + mc->state = mc_user; +#endif + dcbz_64(mc->cr); + return mc->cr; +} + +static inline void qm_mc_abort(struct qm_portal *portal) +{ + __maybe_unused register struct qm_mc *mc = &portal->mc; + DPA_ASSERT(mc->state == mc_user); +#ifdef CONFIG_FSL_DPA_CHECKING + mc->state = mc_idle; +#endif +} + +static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb) +{ + register struct qm_mc *mc = &portal->mc; + struct qm_mc_result *rr = mc->rr + mc->rridx; + DPA_ASSERT(mc->state == mc_user); + lwsync(); +#ifdef CONFIG_FSL_QMAN_BUG_AND_FEATURE_REV1 + if ((qman_ip_rev == QMAN_REV10) && ((myverb & QM_MCC_VERB_MASK) == + QM_MCC_VERB_INITFQ_SCHED)) { + u32 fqid = mc->cr->initfq.fqid; + /* Do two commands to avoid the hw bug. Note, we poll locally + * rather than using qm_mc_result() because from a DPA_CHECKING + * perspective, we don't want to appear to have "finished" until + * both commands are done. */ + mc->cr->__dont_write_directly__verb = mc->vbit | + QM_MCC_VERB_INITFQ_PARKED; + dcbf(mc->cr); + portal->bugs.initfq_and_sched = 1; + do { + dcbit_ro(rr); + } while (!__raw_readb(&rr->verb)); +#ifdef CONFIG_FSL_DPA_CHECKING + mc->state = mc_idle; +#endif + if (rr->result != QM_MCR_RESULT_OK) { +#ifdef CONFIG_FSL_DPA_CHECKING + mc->state = mc_hw; +#endif + return; + } + mc->rridx ^= 1; + mc->vbit ^= QM_MCC_VERB_VBIT; + rr = mc->rr + mc->rridx; + dcbz_64(mc->cr); + mc->cr->alterfq.fqid = fqid; + lwsync(); + myverb = QM_MCC_VERB_ALTER_SCHED; + } else + portal->bugs.initfq_and_sched = 0; +#endif + mc->cr->__dont_write_directly__verb = myverb | mc->vbit; + dcbf(mc->cr); + dcbit_ro(rr); +#ifdef CONFIG_FSL_DPA_CHECKING + mc->state = mc_hw; +#endif +} + +static inline struct qm_mc_result *qm_mc_result(struct qm_portal *portal) +{ + register struct qm_mc *mc = &portal->mc; + struct qm_mc_result *rr = mc->rr + mc->rridx; + DPA_ASSERT(mc->state == mc_hw); + /* The inactive response register's verb byte always returns zero until + * its command is submitted and completed. This includes the valid-bit, + * in case you were wondering... */ + if (!__raw_readb(&rr->verb)) { + dcbit_ro(rr); + return NULL; + } +#ifdef CONFIG_FSL_QMAN_BUG_AND_FEATURE_REV1 + if (qman_ip_rev == QMAN_REV10) { + if ((__raw_readb(&rr->verb) & QM_MCR_VERB_MASK) == + QM_MCR_VERB_QUERYFQ) { + void *misplaced = (void *)rr + 50; + copy_words(&portal->bugs.result, rr, sizeof(*rr)); + rr = &portal->bugs.result; + copy_shorts(&rr->queryfq.fqd.td, misplaced, + sizeof(rr->queryfq.fqd.td)); + } else if (portal->bugs.initfq_and_sched) { + /* We split the user-requested command, make the final + * result match the requested type. */ + copy_words(&portal->bugs.result, rr, sizeof(*rr)); + rr = &portal->bugs.result; + rr->verb = (rr->verb & QM_MCR_VERB_RRID) | + QM_MCR_VERB_INITFQ_SCHED; + } + } +#endif + mc->rridx ^= 1; + mc->vbit ^= QM_MCC_VERB_VBIT; +#ifdef CONFIG_FSL_DPA_CHECKING + mc->state = mc_idle; +#endif + return rr; +} + + +/* ------------------------------------- */ +/* --- Portal interrupt register API --- */ + +static inline int qm_isr_init(__always_unused struct qm_portal *portal) +{ + return 0; +} + +static inline void qm_isr_finish(__always_unused struct qm_portal *portal) +{ +} + +static inline void qm_isr_set_iperiod(struct qm_portal *portal, u16 iperiod) +{ + qm_out(ITPR, iperiod); +} + +static inline u32 __qm_isr_read(struct qm_portal *portal, enum qm_isr_reg n) +{ + return __qm_in(&portal->addr, REG_ISR + (n << 2)); +} + +static inline void __qm_isr_write(struct qm_portal *portal, enum qm_isr_reg n, + u32 val) +{ + __qm_out(&portal->addr, REG_ISR + (n << 2), val); +} diff --git a/drivers/staging/fsl_qbman/qman_private.h b/drivers/staging/fsl_qbman/qman_private.h new file mode 100644 index 0000000..e43a41a --- /dev/null +++ b/drivers/staging/fsl_qbman/qman_private.h @@ -0,0 +1,300 @@ +/* Copyright 2008-2012 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "dpa_sys.h" +#include + +#if !defined(CONFIG_FSL_QMAN_FQ_LOOKUP) && defined(CONFIG_PPC64) +#error "_PPC64 requires _FSL_QMAN_FQ_LOOKUP" +#endif + + /* ----------------- */ + /* Congestion Groups */ + /* ----------------- */ +/* This wrapper represents a bit-array for the state of the 256 Qman congestion + * groups. Is also used as a *mask* for congestion groups, eg. so we ignore + * those that don't concern us. We harness the structure and accessor details + * already used in the management command to query congestion groups. */ +struct qman_cgrs { + struct __qm_mcr_querycongestion q; +}; +static inline void qman_cgrs_init(struct qman_cgrs *c) +{ + memset(c, 0, sizeof(*c)); +} +static inline void qman_cgrs_fill(struct qman_cgrs *c) +{ + memset(c, 0xff, sizeof(*c)); +} +static inline int qman_cgrs_get(struct qman_cgrs *c, int num) +{ + return QM_MCR_QUERYCONGESTION(&c->q, num); +} +static inline void qman_cgrs_set(struct qman_cgrs *c, int num) +{ + c->q.__state[__CGR_WORD(num)] |= (0x80000000 >> __CGR_SHIFT(num)); +} +static inline void qman_cgrs_unset(struct qman_cgrs *c, int num) +{ + c->q.__state[__CGR_WORD(num)] &= ~(0x80000000 >> __CGR_SHIFT(num)); +} +static inline int qman_cgrs_next(struct qman_cgrs *c, int num) +{ + while ((++num < __CGR_NUM) && !qman_cgrs_get(c, num)) + ; + return num; +} +static inline void qman_cgrs_cp(struct qman_cgrs *dest, + const struct qman_cgrs *src) +{ + memcpy(dest, src, sizeof(*dest)); +} +static inline void qman_cgrs_and(struct qman_cgrs *dest, + const struct qman_cgrs *a, const struct qman_cgrs *b) +{ + int ret; + u32 *_d = dest->q.__state; + const u32 *_a = a->q.__state; + const u32 *_b = b->q.__state; + for (ret = 0; ret < 8; ret++) + *(_d++) = *(_a++) & *(_b++); +} +static inline void qman_cgrs_xor(struct qman_cgrs *dest, + const struct qman_cgrs *a, const struct qman_cgrs *b) +{ + int ret; + u32 *_d = dest->q.__state; + const u32 *_a = a->q.__state; + const u32 *_b = b->q.__state; + for (ret = 0; ret < 8; ret++) + *(_d++) = *(_a++) ^ *(_b++); +} + +#define qman_cgrs_for_each_1(cgr, cgrs) \ + for ((cgr) = -1; (cgr) = qman_cgrs_next((cgrs), (cgr)),\ + (cgr) < __CGR_NUM;) + +/* used by CCSR and portal interrupt code */ +enum qm_isr_reg { + qm_isr_status = 0, + qm_isr_enable = 1, + qm_isr_disable = 2, + qm_isr_inhibit = 3 +}; + +struct qm_portal_config { + /* Corenet portal addresses; + * [0]==cache-enabled, [1]==cache-inhibited. */ + __iomem void *addr_virt[2]; + struct resource addr_phys[2]; + struct device_node *node; + /* Allow these to be joined in lists */ + struct list_head list; + /* User-visible portal configuration settings */ + struct qman_portal_config public_cfg; +}; + +/* Revision info (for errata and feature handling) */ +#define QMAN_REV10 0x0100 +#define QMAN_REV11 0x0101 +#define QMAN_REV12 0x0102 +#define QMAN_REV20 0x0200 +#define QMAN_REV30 0x0300 +extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */ +extern u32 qman_clk; + +#ifdef CONFIG_FSL_QMAN_CONFIG +/* Hooks from qman_driver.c to qman_config.c */ +int qman_init_ccsr(struct device_node *node); +void qman_liodn_fixup(u16 channel); +int qman_set_sdest(u16 channel, unsigned int cpu_idx); +#endif + +int qm_set_wpm(int wpm); +int qm_get_wpm(int *wpm); + +/* Hooks from qman_driver.c in to qman_high.c */ +struct qman_portal *qman_create_affine_portal( + const struct qm_portal_config *config, + const struct qman_cgrs *cgrs); +struct qman_portal *qman_create_affine_slave(struct qman_portal *redirect); +const struct qm_portal_config *qman_destroy_affine_portal(void); + +/* This CGR feature is supported by h/w and required by unit-tests and the + * debugfs hooks, so is implemented in the driver. However it allows an explicit + * corruption of h/w fields by s/w that are usually incorruptible (because the + * counters are usually maintained entirely within h/w). As such, we declare + * this API internally. */ +int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt, + struct qm_mcr_cgrtestwrite *result); + +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP +/* If the fq object pointer is greater than the size of context_b field, + * than a lookup table is required. */ +int qman_setup_fq_lookup_table(size_t num_entries); +#endif + +/*************************************************/ +/* QMan s/w corenet portal, low-level i/face */ +/*************************************************/ + +/* Note: most functions are only used by the high-level interface, so are + * inlined from qman_low.h. The stuff below is for use by other parts of the + * driver. */ + +/* For qm_dqrr_sdqcr_set(); Choose one SOURCE. Choose one COUNT. Choose one + * dequeue TYPE. Choose TOKEN (8-bit). + * If SOURCE == CHANNELS, + * Choose CHANNELS_DEDICATED and/or CHANNELS_POOL(n). + * You can choose DEDICATED_PRECEDENCE if the portal channel should have + * priority. + * If SOURCE == SPECIFICWQ, + * Either select the work-queue ID with SPECIFICWQ_WQ(), or select the + * channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the + * work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the + * same value. + */ +#define QM_SDQCR_SOURCE_CHANNELS 0x0 +#define QM_SDQCR_SOURCE_SPECIFICWQ 0x40000000 +#define QM_SDQCR_COUNT_EXACT1 0x0 +#define QM_SDQCR_COUNT_UPTO3 0x20000000 +#define QM_SDQCR_DEDICATED_PRECEDENCE 0x10000000 +#define QM_SDQCR_TYPE_MASK 0x03000000 +#define QM_SDQCR_TYPE_NULL 0x0 +#define QM_SDQCR_TYPE_PRIO_QOS 0x01000000 +#define QM_SDQCR_TYPE_ACTIVE_QOS 0x02000000 +#define QM_SDQCR_TYPE_ACTIVE 0x03000000 +#define QM_SDQCR_TOKEN_MASK 0x00ff0000 +#define QM_SDQCR_TOKEN_SET(v) (((v) & 0xff) << 16) +#define QM_SDQCR_TOKEN_GET(v) (((v) >> 16) & 0xff) +#define QM_SDQCR_CHANNELS_DEDICATED 0x00008000 +#if 0 /* These are defined in the external fsl_qman.h API */ +#define QM_SDQCR_CHANNELS_POOL_MASK 0x00007fff +#define QM_SDQCR_CHANNELS_POOL(n) (0x00008000 >> (n)) +#endif +#define QM_SDQCR_SPECIFICWQ_MASK 0x000000f7 +#define QM_SDQCR_SPECIFICWQ_DEDICATED 0x00000000 +#define QM_SDQCR_SPECIFICWQ_POOL(n) ((n) << 4) +#define QM_SDQCR_SPECIFICWQ_WQ(n) (n) + +/* For qm_dqrr_vdqcr_set(); Choose one PRECEDENCE. EXACT is optional. Use + * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use + * FQID(n) to fill in the frame queue ID. */ +#if 0 /* These are defined in the external fsl_qman.h API */ +#define QM_VDQCR_PRECEDENCE_VDQCR 0x0 +#define QM_VDQCR_PRECEDENCE_SDQCR 0x80000000 +#define QM_VDQCR_EXACT 0x40000000 +#define QM_VDQCR_NUMFRAMES_MASK 0x3f000000 +#define QM_VDQCR_NUMFRAMES_SET(n) (((n) & 0x3f) << 24) +#define QM_VDQCR_NUMFRAMES_GET(n) (((n) >> 24) & 0x3f) +#define QM_VDQCR_NUMFRAMES_TILLEMPTY QM_VDQCR_NUMFRAMES_SET(0) +#endif +#define QM_VDQCR_FQID_MASK 0x00ffffff +#define QM_VDQCR_FQID(n) ((n) & QM_VDQCR_FQID_MASK) + +/* For qm_dqrr_pdqcr_set(); Choose one MODE. Choose one COUNT. + * If MODE==SCHEDULED + * Choose SCHEDULED_CHANNELS or SCHEDULED_SPECIFICWQ. Choose one dequeue TYPE. + * If CHANNELS, + * Choose CHANNELS_DEDICATED and/or CHANNELS_POOL() channels. + * You can choose DEDICATED_PRECEDENCE if the portal channel should have + * priority. + * If SPECIFICWQ, + * Either select the work-queue ID with SPECIFICWQ_WQ(), or select the + * channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the + * work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the + * same value. + * If MODE==UNSCHEDULED + * Choose FQID(). + */ +#define QM_PDQCR_MODE_SCHEDULED 0x0 +#define QM_PDQCR_MODE_UNSCHEDULED 0x80000000 +#define QM_PDQCR_SCHEDULED_CHANNELS 0x0 +#define QM_PDQCR_SCHEDULED_SPECIFICWQ 0x40000000 +#define QM_PDQCR_COUNT_EXACT1 0x0 +#define QM_PDQCR_COUNT_UPTO3 0x20000000 +#define QM_PDQCR_DEDICATED_PRECEDENCE 0x10000000 +#define QM_PDQCR_TYPE_MASK 0x03000000 +#define QM_PDQCR_TYPE_NULL 0x0 +#define QM_PDQCR_TYPE_PRIO_QOS 0x01000000 +#define QM_PDQCR_TYPE_ACTIVE_QOS 0x02000000 +#define QM_PDQCR_TYPE_ACTIVE 0x03000000 +#define QM_PDQCR_CHANNELS_DEDICATED 0x00008000 +#define QM_PDQCR_CHANNELS_POOL(n) (0x00008000 >> (n)) +#define QM_PDQCR_SPECIFICWQ_MASK 0x000000f7 +#define QM_PDQCR_SPECIFICWQ_DEDICATED 0x00000000 +#define QM_PDQCR_SPECIFICWQ_POOL(n) ((n) << 4) +#define QM_PDQCR_SPECIFICWQ_WQ(n) (n) +#define QM_PDQCR_FQID(n) ((n) & 0xffffff) + +/* Used by all portal interrupt registers except 'inhibit'. NB, some of these + * definitions are exported for use by the qman_irqsource_***() APIs, so are + * commented-out here. */ +#define QM_PIRQ_DQAVAIL 0x0000ffff /* Channels with frame availability */ +#if 0 +#define QM_PIRQ_CSCI 0x00100000 /* Congestion State Change */ +#define QM_PIRQ_EQCI 0x00080000 /* Enqueue Command Committed */ +#define QM_PIRQ_EQRI 0x00040000 /* EQCR Ring (below threshold) */ +#define QM_PIRQ_DQRI 0x00020000 /* DQRR Ring (non-empty) */ +#define QM_PIRQ_MRI 0x00010000 /* MR Ring (non-empty) */ +/* This mask contains all the interrupt sources that need handling except DQRI, + * ie. that if present should trigger slow-path processing. */ +#define QM_PIRQ_SLOW (QM_PIRQ_CSCI | QM_PIRQ_EQCI | QM_PIRQ_EQRI | \ + QM_PIRQ_MRI) +#endif +/* The DQAVAIL interrupt fields break down into these bits; */ +#define QM_DQAVAIL_PORTAL 0x8000 /* Portal channel */ +#define QM_DQAVAIL_POOL(n) (0x8000 >> (n)) /* Pool channel, n==[1..15] */ +#define QM_DQAVAIL_MASK 0xffff +/* This mask contains all the "irqsource" bits visible to API users */ +#define QM_PIRQ_VISIBLE (QM_PIRQ_SLOW | QM_PIRQ_DQRI) + +/* These are qm__(). So for example, qm_disable_write() means "write + * the disable register" rather than "disable the ability to write". */ +#define qm_isr_status_read(qm) __qm_isr_read(qm, qm_isr_status) +#define qm_isr_status_clear(qm, m) __qm_isr_write(qm, qm_isr_status, m) +#define qm_isr_enable_read(qm) __qm_isr_read(qm, qm_isr_enable) +#define qm_isr_enable_write(qm, v) __qm_isr_write(qm, qm_isr_enable, v) +#define qm_isr_disable_read(qm) __qm_isr_read(qm, qm_isr_disable) +#define qm_isr_disable_write(qm, v) __qm_isr_write(qm, qm_isr_disable, v) +/* TODO: unfortunate name-clash here, reword? */ +#define qm_isr_inhibit(qm) __qm_isr_write(qm, qm_isr_inhibit, 1) +#define qm_isr_uninhibit(qm) __qm_isr_write(qm, qm_isr_inhibit, 0) + +/* CEETM related */ +#define QMAN_CEETM_MAX 2 +extern struct qm_ceetm qman_ceetms[QMAN_CEETM_MAX]; +int qman_sp_enable_ceetm_mode(enum qm_dc_portal portal, u16 sub_portal); +int qman_sp_disable_ceetm_mode(enum qm_dc_portal portal, u16 sub_portal); +int qman_ceetm_set_prescaler(enum qm_dc_portal portal); +int qman_ceetm_get_prescaler(u16 *pres); +int qman_ceetm_query_cq(unsigned int cqid, unsigned int dcpid, + struct qm_mcr_ceetm_cq_query *cq_query); diff --git a/drivers/staging/fsl_qbman/qman_test.c b/drivers/staging/fsl_qbman/qman_test.c new file mode 100644 index 0000000..ea39449 --- /dev/null +++ b/drivers/staging/fsl_qbman/qman_test.c @@ -0,0 +1,60 @@ +/* Copyright 2008-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "qman_test.h" + +MODULE_AUTHOR("Geoff Thorpe"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("Qman testing"); + +static int test_init(void) +{ + int loop = 1; + while(loop--) { +#ifdef CONFIG_FSL_QMAN_TEST_STASH_POTATO + qman_test_hotpotato(); +#endif +#ifdef CONFIG_FSL_QMAN_TEST_HIGH + qman_test_high(); +#endif +#ifdef CONFIG_FSL_QMAN_TEST_ERRATA + qman_test_errata(); +#endif + } + return 0; +} + +static void test_exit(void) +{ +} + +module_init(test_init); +module_exit(test_exit); diff --git a/drivers/staging/fsl_qbman/qman_test.h b/drivers/staging/fsl_qbman/qman_test.h new file mode 100644 index 0000000..082d9f2 --- /dev/null +++ b/drivers/staging/fsl_qbman/qman_test.h @@ -0,0 +1,84 @@ +/* Copyright 2008-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +void qman_test_hotpotato(void); +void qman_test_high(void); +void qman_test_errata(void); +void qman_test_fqrange(void); + +static inline void __hexdump(unsigned long start, unsigned long end, + unsigned long p, size_t sz, const unsigned char *c) +{ + while (start < end) { + unsigned int pos = 0; + char buf[64]; + int nl = 0; + pos += sprintf(buf + pos, "%08lx: ", start); + do { + if ((start < p) || (start >= (p + sz))) + pos += sprintf(buf + pos, ".."); + else + pos += sprintf(buf + pos, "%02x", *(c++)); + if (!(++start & 15)) { + buf[pos++] = '\n'; + nl = 1; + } else { + nl = 0; + if(!(start & 1)) + buf[pos++] = ' '; + if(!(start & 3)) + buf[pos++] = ' '; + } + } while (start & 15); + if (!nl) + buf[pos++] = '\n'; + buf[pos] = '\0'; + pr_info("%s", buf); + } +} +static inline void hexdump(const void *ptr, size_t sz) +{ + unsigned long p = (unsigned long)ptr; + unsigned long start = p & ~(unsigned long)15; + unsigned long end = (p + sz + 15) & ~(unsigned long)15; + const unsigned char *c = ptr; + __hexdump(start, end, p, sz, c); +} diff --git a/drivers/staging/fsl_qbman/qman_test_errata.c b/drivers/staging/fsl_qbman/qman_test_errata.c new file mode 100644 index 0000000..0f44cad --- /dev/null +++ b/drivers/staging/fsl_qbman/qman_test_errata.c @@ -0,0 +1,247 @@ +/* Copyright 2009-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "qman_test.h" + +/* Waiting on a model fix from virtutech */ +#if 0 +/*********************/ +/* generic utilities */ +/*********************/ + +static int do_enqueues(struct qman_fq *fq, const struct qm_fd *fds, int num) +{ + int ret = 0; + u32 flags = QMAN_ENQUEUE_FLAG_WAIT; + while (num-- && !ret) { + if (!num) + flags |= QMAN_ENQUEUE_FLAG_WAIT_SYNC; + pr_info("about to enqueue\n"); + ret = qman_enqueue(fq, fds++, flags); + } + return ret; +} + +/***************************/ +/* "tdthresh" test (QMAN6) */ +/***************************/ + +/* First thresh == 201 * (2^21) == 421527552 (0x19200000) */ +#define THRESH_MANT 201 +#define THRESH_EXP 21 + +/* first three equal thresh, fourth takes us over */ +static const struct qm_fd td_eq[] = { + QM_FD_FMT_20(0, 0x34, 0x87654321, QM_FD_SG, 0, 79321), + QM_FD_FMT_29(0, 0x34, 0x87654321, QM_FD_COMPOUND, 29923679), + QM_FD_FMT_29(0, 0x0d, 0xacadabba, QM_FD_CONTIG_BIG, 391524552), + QM_FD_FMT_20(0, 0x0b, 0x0fa10ada, QM_FD_CONTIG, 0, 1), + QM_FD_FMT_20(0, 0x0b, 0x0fa10ada, QM_FD_CONTIG, 0, 1), +}; + +struct tdthresh_fq { + struct qman_fq fq; + int got_ern; + int num_dqrr; +}; + +static enum qman_cb_dqrr_result cb_dqrr_tdthresh(struct qman_portal *p, + struct qman_fq *__fq, + const struct qm_dqrr_entry *dqrr) +{ + struct tdthresh_fq *t = (void *)__fq; + t->num_dqrr++; + return qman_cb_dqrr_consume; +} + +static void cb_ern_tdthresh(struct qman_portal *p, struct qman_fq *__fq, + const struct qm_mr_entry *mr) +{ + struct tdthresh_fq *t = (void *)__fq; + t->got_ern = 1; +} + +static void test_tdthresh(void) +{ + struct tdthresh_fq tdfq = { + .fq = { + .cb = { + .dqrr = cb_dqrr_tdthresh, + .ern = cb_ern_tdthresh + } + }, + .got_ern = 0, + .num_dqrr = 0 + }; + struct qman_fq *fq = &tdfq.fq; + struct qm_mcc_initfq opts = { + .we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_TDTHRESH, + .fqd = { + .fq_ctrl = QM_FQCTRL_TDE, + .td = { + .exp = THRESH_EXP, + .mant = THRESH_MANT, + } + } + }; + struct qm_fqd fqd; + u32 flags; + int ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID, fq); + BUG_ON(ret); + /* leave it parked, and set it for local dequeue (loopback) */ + ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_LOCAL, &opts); + BUG_ON(ret); + /* query it back and confirm everything is ok */ + ret = qman_query_fq(fq, &fqd); + BUG_ON(ret); + if (fqd.fq_ctrl != opts.fqd.fq_ctrl) { + pr_err("queried fq_ctrl=%x, should be=%x\n", fqd.fq_ctrl, + opts.fqd.fq_ctrl); + panic("fail"); + } + if (memcmp(&fqd.td, &opts.fqd.td, sizeof(fqd.td))) { + pr_err("queried td_thresh=%x:%x, should be=%x:%x\n", + fqd.td.exp, fqd.td.mant, + opts.fqd.td.exp, opts.fqd.td.mant); + panic("fail"); + } + ret = do_enqueues(fq, td_eq, 3); + BUG_ON(ret); + pr_info(" tdthresh: eq[0..2] complete\n"); + /* enqueues are flushed, so if Qman is going to throw an ERN, the irq + * assertion will already be on its way. */ + msleep(500); + BUG_ON(tdfq.got_ern); + pr_info(" tdthresh: eq <= thresh OK\n"); + ret = do_enqueues(fq, td_eq + 3, 1); + BUG_ON(ret); + pr_info(" tdthresh: eq[3] complete\n"); + /* enqueues are flushed, so if Qman is going to throw an ERN, the irq + * assertion will already be on its way. */ + msleep(500); + BUG_ON(tdfq.got_ern); + pr_info(" tdthresh: eq <= thresh OK\n"); + ret = do_enqueues(fq, td_eq + 4, 1); + BUG_ON(ret); + pr_info(" tdthresh: eq[4] complete\n"); + /* enqueues are flushed, so if Qman is going to throw an ERN, the irq + * assertion will already be on its way. */ + msleep(500); + BUG_ON(!tdfq.got_ern); + pr_info(" tdthresh: eq > thresh OK\n"); + ret = qman_volatile_dequeue(fq, + QMAN_VOLATILE_FLAG_WAIT | QMAN_VOLATILE_FLAG_FINISH, + QM_VDQCR_NUMFRAMES_TILLEMPTY); + BUG_ON(ret); + BUG_ON(tdfq.num_dqrr != 4); + ret = qman_retire_fq(fq, &flags); + BUG_ON(ret); + BUG_ON(flags); + ret = qman_oos_fq(fq); + BUG_ON(ret); +} + +/****************************/ +/* "ern code6" test (QMAN9) */ +/****************************/ + +/* Dummy FD to enqueue out-of-sequence and generate an ERN */ +static const struct qm_fd c6_eq = + QM_FD_FMT_29(0, 0xba, 0xdeadbeef, QM_FD_CONTIG_BIG, 1234); + +struct code6_fq { + struct qman_fq fq; + struct qm_mr_entry mr; + struct completion got_ern; +}; + +static void cb_ern_code6(struct qman_portal *p, struct qman_fq *__fq, + const struct qm_mr_entry *mr) +{ + struct code6_fq *c = (void *)__fq; + memcpy(&c->mr, mr, sizeof(*mr)); + complete(&c->got_ern); +} + +static void test_ern_code6(void) +{ + struct code6_fq c6fq = { + .fq = { + .cb = { + .ern = cb_ern_code6 + } + }, + .got_ern = COMPLETION_INITIALIZER(c6fq.got_ern) + }; + struct qman_fq *fq = &c6fq.fq; + struct qm_mcc_initfq opts = { + .we_mask = QM_INITFQ_WE_FQCTRL, + .fqd = { + .fq_ctrl = QM_FQCTRL_ORP + } + }; + u32 flags; + int ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID, fq); + BUG_ON(ret); + /* leave it parked, and set it for local dequeue (loopback) */ + ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_LOCAL, &opts); + BUG_ON(ret); + /* enqueue with ORP using a "too early" sequence number */ + ret = qman_enqueue_orp(fq, &c6_eq, + QMAN_ENQUEUE_FLAG_WAIT | QMAN_ENQUEUE_FLAG_WAIT_SYNC, fq, 5); + BUG_ON(ret); + pr_info(" code6: eq complete\n"); + ret = qman_retire_fq(fq, &flags); + BUG_ON(ret); + pr_info(" code6: retire complete, flags=%08x\n", flags); + BUG_ON(flags != QMAN_FQ_STATE_ORL); + wait_for_completion(&c6fq.got_ern); + pr_info(" code6: ERN, VERB=0x%02x, RC==0x%02x\n", + c6fq.mr.verb, c6fq.mr.ern.rc); + BUG_ON(c6fq.mr.verb & 0x20); + BUG_ON((c6fq.mr.ern.rc & QM_MR_RC_MASK) != QM_MR_RC_ORPWINDOW_RETIRED); + ret = qman_oos_fq(fq); + BUG_ON(ret); +} + +void qman_test_errata(void) +{ + pr_info("Testing Qman errata handling ...\n"); + test_tdthresh(); + test_ern_code6(); + pr_info(" ... SUCCESS!\n"); +} +#else +void qman_test_errata(void) +{ + pr_info("Qman errata-handling test disabled, waiting on model fix\n"); +} +#endif diff --git a/drivers/staging/fsl_qbman/qman_test_high.c b/drivers/staging/fsl_qbman/qman_test_high.c new file mode 100644 index 0000000..cba6c25 --- /dev/null +++ b/drivers/staging/fsl_qbman/qman_test_high.c @@ -0,0 +1,212 @@ +/* Copyright 2008-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "qman_test.h" + +/*************/ +/* constants */ +/*************/ + +#define CGR_ID 27 +#define POOL_ID 2 +#define FQ_FLAGS QMAN_FQ_FLAG_DYNAMIC_FQID +#define NUM_ENQUEUES 10 +#define NUM_PARTIAL 4 +#define PORTAL_SDQCR (QM_SDQCR_SOURCE_CHANNELS | \ + QM_SDQCR_TYPE_PRIO_QOS | \ + QM_SDQCR_TOKEN_SET(0x98) | \ + QM_SDQCR_CHANNELS_DEDICATED | \ + QM_SDQCR_CHANNELS_POOL(POOL_ID)) +#define PORTAL_OPAQUE (void *)0xf00dbeef +#define VDQCR_FLAGS (QMAN_VOLATILE_FLAG_WAIT | QMAN_VOLATILE_FLAG_FINISH) + +/*************************************/ +/* Predeclarations (eg. for fq_base) */ +/*************************************/ + +static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *, + struct qman_fq *, + const struct qm_dqrr_entry *); +static void cb_ern(struct qman_portal *, struct qman_fq *, + const struct qm_mr_entry *); +static void cb_fqs(struct qman_portal *, struct qman_fq *, + const struct qm_mr_entry *); + +/***************/ +/* global vars */ +/***************/ + +static struct qm_fd fd, fd_dq; +static struct qman_fq fq_base = { + .cb.dqrr = cb_dqrr, + .cb.ern = cb_ern, + .cb.fqs = cb_fqs +}; +static DECLARE_WAIT_QUEUE_HEAD(waitqueue); +static int retire_complete, sdqcr_complete; + +/**********************/ +/* internal functions */ +/**********************/ + +/* Helpers for initialising and "incrementing" a frame descriptor */ +static void fd_init(struct qm_fd *__fd) +{ + qm_fd_addr_set64(__fd, 0xabdeadbeefLLU); + __fd->format = qm_fd_contig_big; + __fd->length29 = 0x0000ffff; + __fd->cmd = 0xfeedf00d; +} + +static void fd_inc(struct qm_fd *__fd) +{ + u64 t = qm_fd_addr_get64(__fd); + int z = t >> 40; + t <<= 1; + if (z) + t |= 1; + qm_fd_addr_set64(__fd, t); + __fd->length29--; + __fd->cmd++; +} + +/* The only part of the 'fd' we can't memcmp() is the ppid */ +static int fd_cmp(const struct qm_fd *a, const struct qm_fd *b) +{ + int r = (qm_fd_addr_get64(a) == qm_fd_addr_get64(b)) ? 0 : -1; + if (!r) + r = a->format - b->format; + if (!r) + r = a->opaque - b->opaque; + if (!r) + r = a->cmd - b->cmd; + return r; +} + +/********/ +/* test */ +/********/ + +static void do_enqueues(struct qman_fq *fq) +{ + unsigned int loop; + for (loop = 0; loop < NUM_ENQUEUES; loop++) { + if (qman_enqueue(fq, &fd, QMAN_ENQUEUE_FLAG_WAIT | + (((loop + 1) == NUM_ENQUEUES) ? + QMAN_ENQUEUE_FLAG_WAIT_SYNC : 0))) + panic("qman_enqueue() failed\n"); + fd_inc(&fd); + } +} + +void qman_test_high(void) +{ + int flags, res; + struct qman_fq *fq = &fq_base; + + pr_info("qman_test_high starting\n"); + fd_init(&fd); + fd_init(&fd_dq); + + /* Initialise (parked) FQ */ + if (qman_create_fq(0, FQ_FLAGS, fq)) + panic("qman_create_fq() failed\n"); + if (qman_init_fq(fq, QMAN_INITFQ_FLAG_LOCAL, NULL)) + panic("qman_init_fq() failed\n"); + + /* Do enqueues + VDQCR, twice. (Parked FQ) */ + do_enqueues(fq); + pr_info("VDQCR (till-empty);\n"); + if (qman_volatile_dequeue(fq, VDQCR_FLAGS, + QM_VDQCR_NUMFRAMES_TILLEMPTY)) + panic("qman_volatile_dequeue() failed\n"); + do_enqueues(fq); + pr_info("VDQCR (%d of %d);\n", NUM_PARTIAL, NUM_ENQUEUES); + if (qman_volatile_dequeue(fq, VDQCR_FLAGS, + QM_VDQCR_NUMFRAMES_SET(NUM_PARTIAL))) + panic("qman_volatile_dequeue() failed\n"); + pr_info("VDQCR (%d of %d);\n", NUM_ENQUEUES - NUM_PARTIAL, + NUM_ENQUEUES); + if (qman_volatile_dequeue(fq, VDQCR_FLAGS, + QM_VDQCR_NUMFRAMES_SET(NUM_ENQUEUES - NUM_PARTIAL))) + panic("qman_volatile_dequeue() failed\n"); + + do_enqueues(fq); + pr_info("scheduled dequeue (till-empty)\n"); + if (qman_schedule_fq(fq)) + panic("qman_schedule_fq() failed\n"); + wait_event(waitqueue, sdqcr_complete); + + /* Retire and OOS the FQ */ + res = qman_retire_fq(fq, &flags); + if (res < 0) + panic("qman_retire_fq() failed\n"); + wait_event(waitqueue, retire_complete); + if (flags & QMAN_FQ_STATE_BLOCKOOS) + panic("leaking frames\n"); + if (qman_oos_fq(fq)) + panic("qman_oos_fq() failed\n"); + qman_destroy_fq(fq, 0); + pr_info("qman_test_high finished\n"); +} + +static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p, + struct qman_fq *fq, + const struct qm_dqrr_entry *dq) +{ + if (fd_cmp(&fd_dq, &dq->fd)) { + pr_err("BADNESS: dequeued frame doesn't match;\n"); + BUG(); + } + fd_inc(&fd_dq); + if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_cmp(&fd_dq, &fd)) { + sdqcr_complete = 1; + wake_up(&waitqueue); + } + return qman_cb_dqrr_consume; +} + +static void cb_ern(struct qman_portal *p, struct qman_fq *fq, + const struct qm_mr_entry *msg) +{ + panic("cb_ern() unimplemented"); +} + +static void cb_fqs(struct qman_portal *p, struct qman_fq *fq, + const struct qm_mr_entry *msg) +{ + u8 verb = (msg->verb & QM_MR_VERB_TYPE_MASK); + if ((verb != QM_MR_VERB_FQRN) && (verb != QM_MR_VERB_FQRNI)) + panic("unexpected FQS message"); + pr_info("Retirement message received\n"); + retire_complete = 1; + wake_up(&waitqueue); +} diff --git a/drivers/staging/fsl_qbman/qman_test_hotpotato.c b/drivers/staging/fsl_qbman/qman_test_hotpotato.c new file mode 100644 index 0000000..91e01d7 --- /dev/null +++ b/drivers/staging/fsl_qbman/qman_test_hotpotato.c @@ -0,0 +1,497 @@ +/* Copyright 2009-2012 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include "qman_test.h" + +/* Algorithm: + * + * Each cpu will have HP_PER_CPU "handlers" set up, each of which incorporates + * an rx/tx pair of FQ objects (both of which are stashed on dequeue). The + * organisation of FQIDs is such that the HP_PER_CPU*NUM_CPUS handlers will + * shuttle a "hot potato" frame around them such that every forwarding action + * moves it from one cpu to another. (The use of more than one handler per cpu + * is to allow enough handlers/FQs to truly test the significance of caching - + * ie. when cache-expiries are occuring.) + * + * The "hot potato" frame content will be HP_NUM_WORDS*4 bytes in size, and the + * first and last words of the frame data will undergo a transformation step on + * each forwarding action. To achieve this, each handler will be assigned a + * 32-bit "mixer", that is produced using a 32-bit LFSR. When a frame is + * received by a handler, the mixer of the expected sender is XOR'd into all + * words of the entire frame, which is then validated against the original + * values. Then, before forwarding, the entire frame is XOR'd with the mixer of + * the current handler. Apart from validating that the frame is taking the + * expected path, this also provides some quasi-realistic overheads to each + * forwarding action - dereferencing *all* the frame data, computation, and + * conditional branching. There is a "special" handler designated to act as the + * instigator of the test by creating an enqueuing the "hot potato" frame, and + * to determine when the test has completed by counting HP_LOOPS iterations. + * + * Init phases: + * + * 1. prepare each cpu's 'hp_cpu' struct using on_each_cpu(,,1) and link them + * into 'hp_cpu_list'. Specifically, set processor_id, allocate HP_PER_CPU + * handlers and link-list them (but do no other handler setup). + * + * 2. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each + * hp_cpu's 'iterator' to point to its first handler. With each loop, + * allocate rx/tx FQIDs and mixer values to the hp_cpu's iterator handler + * and advance the iterator for the next loop. This includes a final fixup, + * which connects the last handler to the first (and which is why phase 2 + * and 3 are separate). + * + * 3. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each + * hp_cpu's 'iterator' to point to its first handler. With each loop, + * initialise FQ objects and advance the iterator for the next loop. + * Moreover, do this initialisation on the cpu it applies to so that Rx FQ + * initialisation targets the correct cpu. + */ + +/* helper to run something on all cpus (can't use on_each_cpu(), as that invokes + * the fn from irq context, which is too restrictive). */ +struct bstrap { + void (*fn)(void); + atomic_t started; +}; +static int bstrap_fn(void *__bstrap) +{ + struct bstrap *bstrap = __bstrap; + atomic_inc(&bstrap->started); + bstrap->fn(); + while (!kthread_should_stop()) + msleep(1); + return 0; +} +static int on_all_cpus(void (*fn)(void)) +{ + int cpu; + for_each_cpu(cpu, cpu_online_mask) { + struct bstrap bstrap = { + .fn = fn, + .started = ATOMIC_INIT(0) + }; + struct task_struct *k = kthread_create(bstrap_fn, &bstrap, + "hotpotato%d", cpu); + int ret; + if (IS_ERR(k)) + return -ENOMEM; + kthread_bind(k, cpu); + wake_up_process(k); + /* If we call kthread_stop() before the "wake up" has had an + * effect, then the thread may exit with -EINTR without ever + * running the function. So poll until it's started before + * requesting it to stop. */ + while (!atomic_read(&bstrap.started)) + msleep(10); + ret = kthread_stop(k); + if (ret) + return ret; + } + return 0; +} + +struct hp_handler { + + /* The following data is stashed when 'rx' is dequeued; */ + /* -------------- */ + /* The Rx FQ, dequeues of which will stash the entire hp_handler */ + struct qman_fq rx; + /* The Tx FQ we should forward to */ + struct qman_fq tx; + /* The value we XOR post-dequeue, prior to validating */ + u32 rx_mixer; + /* The value we XOR pre-enqueue, after validating */ + u32 tx_mixer; + /* what the hotpotato address should be on dequeue */ + dma_addr_t addr; + u32 *frame_ptr; + + /* The following data isn't (necessarily) stashed on dequeue; */ + /* -------------- */ + u32 fqid_rx, fqid_tx; + /* list node for linking us into 'hp_cpu' */ + struct list_head node; + /* Just to check ... */ + unsigned int processor_id; +} ____cacheline_aligned; + +struct hp_cpu { + /* identify the cpu we run on; */ + unsigned int processor_id; + /* root node for the per-cpu list of handlers */ + struct list_head handlers; + /* list node for linking us into 'hp_cpu_list' */ + struct list_head node; + /* when repeatedly scanning 'hp_list', each time linking the n'th + * handlers together, this is used as per-cpu iterator state */ + struct hp_handler *iterator; +}; + +/* Each cpu has one of these */ +static DEFINE_PER_CPU(struct hp_cpu, hp_cpus); + +/* links together the hp_cpu structs, in first-come first-serve order. */ +static LIST_HEAD(hp_cpu_list); +static spinlock_t hp_lock = __SPIN_LOCK_UNLOCKED(hp_lock); + +static unsigned int hp_cpu_list_length; + +/* the "special" handler, that starts and terminates the test. */ +static struct hp_handler *special_handler; +static int loop_counter; + +/* handlers are allocated out of this, so they're properly aligned. */ +static struct kmem_cache *hp_handler_slab; + +/* this is the frame data */ +static void *__frame_ptr; +static u32 *frame_ptr; +static dma_addr_t frame_dma; + +/* the main function waits on this */ +static DECLARE_WAIT_QUEUE_HEAD(queue); + +#define HP_PER_CPU 2 +#define HP_LOOPS 8 +/* 80 bytes, like a small ethernet frame, and bleeds into a second cacheline */ +#define HP_NUM_WORDS 80 +/* First word of the LFSR-based frame data */ +#define HP_FIRST_WORD 0xabbaf00d + +static inline u32 do_lfsr(u32 prev) +{ + return (prev >> 1) ^ (-(prev & 1u) & 0xd0000001u); +} + +static void allocate_frame_data(void) +{ + u32 lfsr = HP_FIRST_WORD; + int loop; + struct platform_device *pdev = platform_device_alloc("foobar", -1); + if (!pdev) + panic("platform_device_alloc() failed"); + if (platform_device_add(pdev)) + panic("platform_device_add() failed"); + __frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL); + if (!__frame_ptr) + panic("kmalloc() failed"); + frame_ptr = (void *)(((unsigned long)__frame_ptr + 63) & + ~(unsigned long)63); + for (loop = 0; loop < HP_NUM_WORDS; loop++) { + frame_ptr[loop] = lfsr; + lfsr = do_lfsr(lfsr); + } + frame_dma = dma_map_single(&pdev->dev, frame_ptr, 4 * HP_NUM_WORDS, + DMA_BIDIRECTIONAL); + platform_device_del(pdev); + platform_device_put(pdev); +} + +static void deallocate_frame_data(void) +{ + kfree(__frame_ptr); +} + +static inline void process_frame_data(struct hp_handler *handler, + const struct qm_fd *fd) +{ + u32 *p = handler->frame_ptr; + u32 lfsr = HP_FIRST_WORD; + int loop; + if (qm_fd_addr_get64(fd) != handler->addr) + panic("bad frame address"); + for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) { + *p ^= handler->rx_mixer; + if (*p != lfsr) + panic("corrupt frame data"); + *p ^= handler->tx_mixer; + lfsr = do_lfsr(lfsr); + } +} + +static enum qman_cb_dqrr_result normal_dqrr(struct qman_portal *portal, + struct qman_fq *fq, + const struct qm_dqrr_entry *dqrr) +{ + struct hp_handler *handler = (struct hp_handler *)fq; + + process_frame_data(handler, &dqrr->fd); + if (qman_enqueue(&handler->tx, &dqrr->fd, 0)) + panic("qman_enqueue() failed"); + return qman_cb_dqrr_consume; +} + +static enum qman_cb_dqrr_result special_dqrr(struct qman_portal *portal, + struct qman_fq *fq, + const struct qm_dqrr_entry *dqrr) +{ + struct hp_handler *handler = (struct hp_handler *)fq; + + process_frame_data(handler, &dqrr->fd); + if (++loop_counter < HP_LOOPS) { + if (qman_enqueue(&handler->tx, &dqrr->fd, 0)) + panic("qman_enqueue() failed"); + } else { + pr_info("Received final (%dth) frame\n", loop_counter); + wake_up(&queue); + } + return qman_cb_dqrr_consume; +} + +static void create_per_cpu_handlers(void) +{ + struct hp_handler *handler; + int loop; + struct hp_cpu *hp_cpu = &__get_cpu_var(hp_cpus); + + hp_cpu->processor_id = smp_processor_id(); + spin_lock(&hp_lock); + list_add_tail(&hp_cpu->node, &hp_cpu_list); + hp_cpu_list_length++; + spin_unlock(&hp_lock); + INIT_LIST_HEAD(&hp_cpu->handlers); + for (loop = 0; loop < HP_PER_CPU; loop++) { + handler = kmem_cache_alloc(hp_handler_slab, GFP_KERNEL); + if (!handler) + panic("kmem_cache_alloc() failed"); + handler->processor_id = hp_cpu->processor_id; + handler->addr = frame_dma; + handler->frame_ptr = frame_ptr; + list_add_tail(&handler->node, &hp_cpu->handlers); + } +} + +static void destroy_per_cpu_handlers(void) +{ + struct list_head *loop, *tmp; + struct hp_cpu *hp_cpu = &__get_cpu_var(hp_cpus); + + spin_lock(&hp_lock); + list_del(&hp_cpu->node); + spin_unlock(&hp_lock); + list_for_each_safe(loop, tmp, &hp_cpu->handlers) { + u32 flags; + struct hp_handler *handler = list_entry(loop, struct hp_handler, + node); + if (qman_retire_fq(&handler->rx, &flags)) + panic("qman_retire_fq(rx) failed"); + BUG_ON(flags & QMAN_FQ_STATE_BLOCKOOS); + if (qman_oos_fq(&handler->rx)) + panic("qman_oos_fq(rx) failed"); + qman_destroy_fq(&handler->rx, 0); + qman_destroy_fq(&handler->tx, 0); + qman_release_fqid(handler->fqid_rx); + list_del(&handler->node); + kmem_cache_free(hp_handler_slab, handler); + } +} + +static inline u8 num_cachelines(u32 offset) +{ + u8 res = (offset + (L1_CACHE_BYTES - 1)) + / (L1_CACHE_BYTES); + if (res > 3) + return 3; + return res; +} +#define STASH_DATA_CL \ + num_cachelines(HP_NUM_WORDS * 4) +#define STASH_CTX_CL \ + num_cachelines(offsetof(struct hp_handler,fqid_rx)) + +static void init_handler(void *__handler) +{ + struct qm_mcc_initfq opts; + struct hp_handler *handler = __handler; + BUG_ON(handler->processor_id != smp_processor_id()); + /* Set up rx */ + memset(&handler->rx, 0, sizeof(handler->rx)); + if (handler == special_handler) + handler->rx.cb.dqrr = special_dqrr; + else + handler->rx.cb.dqrr = normal_dqrr; + if (qman_create_fq(handler->fqid_rx, 0, &handler->rx)) + panic("qman_create_fq(rx) failed"); + memset(&opts, 0, sizeof(opts)); + opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA; + opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING; + opts.fqd.context_a.stashing.data_cl = STASH_DATA_CL; + opts.fqd.context_a.stashing.context_cl = STASH_CTX_CL; + if (qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED | + QMAN_INITFQ_FLAG_LOCAL, &opts)) + panic("qman_init_fq(rx) failed"); + /* Set up tx */ + memset(&handler->tx, 0, sizeof(handler->tx)); + if (qman_create_fq(handler->fqid_tx, QMAN_FQ_FLAG_NO_MODIFY, + &handler->tx)) + panic("qman_create_fq(tx) failed"); +} + +static void init_phase2(void) +{ + int loop; + u32 fqid = 0; + u32 lfsr = 0xdeadbeef; + struct hp_cpu *hp_cpu; + struct hp_handler *handler; + + for (loop = 0; loop < HP_PER_CPU; loop++) { + list_for_each_entry(hp_cpu, &hp_cpu_list, node) { + int ret; + if (!loop) + hp_cpu->iterator = list_first_entry( + &hp_cpu->handlers, + struct hp_handler, node); + else + hp_cpu->iterator = list_entry( + hp_cpu->iterator->node.next, + struct hp_handler, node); + /* Rx FQID is the previous handler's Tx FQID */ + hp_cpu->iterator->fqid_rx = fqid; + /* Allocate new FQID for Tx */ + ret = qman_alloc_fqid(&fqid); + if (ret) + panic("qman_alloc_fqid() failed"); + hp_cpu->iterator->fqid_tx = fqid; + /* Rx mixer is the previous handler's Tx mixer */ + hp_cpu->iterator->rx_mixer = lfsr; + /* Get new mixer for Tx */ + lfsr = do_lfsr(lfsr); + hp_cpu->iterator->tx_mixer = lfsr; + } + } + /* Fix up the first handler (fqid_rx==0, rx_mixer=0xdeadbeef) */ + hp_cpu = list_first_entry(&hp_cpu_list, struct hp_cpu, node); + handler = list_first_entry(&hp_cpu->handlers, struct hp_handler, node); + BUG_ON((handler->fqid_rx != 0) || (handler->rx_mixer != 0xdeadbeef)); + handler->fqid_rx = fqid; + handler->rx_mixer = lfsr; + /* and tag it as our "special" handler */ + special_handler = handler; +} + +static void init_phase3(void) +{ + int loop; + struct hp_cpu *hp_cpu; + + for (loop = 0; loop < HP_PER_CPU; loop++) { + list_for_each_entry(hp_cpu, &hp_cpu_list, node) { + if (!loop) + hp_cpu->iterator = list_first_entry( + &hp_cpu->handlers, + struct hp_handler, node); + else + hp_cpu->iterator = list_entry( + hp_cpu->iterator->node.next, + struct hp_handler, node); + preempt_disable(); + if (hp_cpu->processor_id == smp_processor_id()) + init_handler(hp_cpu->iterator); + else + smp_call_function_single(hp_cpu->processor_id, + init_handler, hp_cpu->iterator, 1); + preempt_enable(); + } + } +} + +static void send_first_frame(void *ignore) +{ + u32 *p = special_handler->frame_ptr; + u32 lfsr = HP_FIRST_WORD; + int loop; + struct qm_fd fd; + + BUG_ON(special_handler->processor_id != smp_processor_id()); + memset(&fd, 0, sizeof(fd)); + qm_fd_addr_set64(&fd, special_handler->addr); + fd.format = qm_fd_contig_big; + fd.length29 = HP_NUM_WORDS * 4; + for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) { + if (*p != lfsr) + panic("corrupt frame data"); + *p ^= special_handler->tx_mixer; + lfsr = do_lfsr(lfsr); + } + pr_info("Sending first frame\n"); + if (qman_enqueue(&special_handler->tx, &fd, 0)) + panic("qman_enqueue() failed"); +} + +void qman_test_hotpotato(void) +{ + if (cpumask_weight(cpu_online_mask) < 2) { + pr_info("qman_test_hotpotato, skip - only 1 CPU\n"); + return; + } + + pr_info("qman_test_hotpotato starting\n"); + + hp_cpu_list_length = 0; + loop_counter = 0; + hp_handler_slab = kmem_cache_create("hp_handler_slab", + sizeof(struct hp_handler), L1_CACHE_BYTES, + SLAB_HWCACHE_ALIGN, NULL); + if (!hp_handler_slab) + panic("kmem_cache_create() failed"); + + allocate_frame_data(); + + /* Init phase 1 */ + pr_info("Creating %d handlers per cpu...\n", HP_PER_CPU); + if (on_all_cpus(create_per_cpu_handlers)) + panic("on_each_cpu() failed"); + pr_info("Number of cpus: %d, total of %d handlers\n", + hp_cpu_list_length, hp_cpu_list_length * HP_PER_CPU); + + init_phase2(); + + init_phase3(); + + preempt_disable(); + if (special_handler->processor_id == smp_processor_id()) + send_first_frame(NULL); + else + smp_call_function_single(special_handler->processor_id, + send_first_frame, NULL, 1); + preempt_enable(); + + wait_event(queue, loop_counter == HP_LOOPS); + deallocate_frame_data(); + if (on_all_cpus(destroy_per_cpu_handlers)) + panic("on_each_cpu() failed"); + kmem_cache_destroy(hp_handler_slab); + pr_info("qman_test_hotpotato finished\n"); +} diff --git a/drivers/staging/fsl_qbman/qman_utility.c b/drivers/staging/fsl_qbman/qman_utility.c new file mode 100644 index 0000000..fa2f40b --- /dev/null +++ b/drivers/staging/fsl_qbman/qman_utility.c @@ -0,0 +1,130 @@ +/* Copyright 2008-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "qman_private.h" + +/* ----------------- */ +/* --- FQID Pool --- */ + +struct qman_fqid_pool { + /* Base and size of the FQID range */ + u32 fqid_base; + u32 total; + /* Number of FQIDs currently "allocated" */ + u32 used; + /* Allocation optimisation. When 'usedfqid_base = fqid_start; + pool->total = num; + pool->used = 0; + pool->next = 0; + pool->bits = kmalloc(QNUM_BYTES(num), GFP_KERNEL); + if (!pool->bits) { + kfree(pool); + return NULL; + } + memset(pool->bits, 0, QNUM_BYTES(num)); + /* If num is not an even multiple of QLONG_BITS (or even 8, for + * byte-oriented searching) then we fill the trailing bits with 1, to + * make them look allocated (permanently). */ + for (i = num + 1; i < QNUM_BITS(num); i++) + set_bit(i, pool->bits); + return pool; +} +EXPORT_SYMBOL(qman_fqid_pool_create); + +int qman_fqid_pool_destroy(struct qman_fqid_pool *pool) +{ + int ret = pool->used; + kfree(pool->bits); + kfree(pool); + return ret; +} +EXPORT_SYMBOL(qman_fqid_pool_destroy); + +int qman_fqid_pool_alloc(struct qman_fqid_pool *pool, u32 *fqid) +{ + int ret; + if (pool->used == pool->total) + return -ENOMEM; + *fqid = pool->fqid_base + pool->next; + ret = test_and_set_bit(pool->next, pool->bits); + BUG_ON(ret); + if (++pool->used == pool->total) + return 0; + pool->next = find_next_zero_bit(pool->bits, pool->total, pool->next); + if (pool->next >= pool->total) + pool->next = find_first_zero_bit(pool->bits, pool->total); + BUG_ON(pool->next >= pool->total); + return 0; +} +EXPORT_SYMBOL(qman_fqid_pool_alloc); + +void qman_fqid_pool_free(struct qman_fqid_pool *pool, u32 fqid) +{ + int ret; + + fqid -= pool->fqid_base; + ret = test_and_clear_bit(fqid, pool->bits); + BUG_ON(!ret); + if (pool->used-- == pool->total) + pool->next = fqid; +} +EXPORT_SYMBOL(qman_fqid_pool_free); + +u32 qman_fqid_pool_used(struct qman_fqid_pool *pool) +{ + return pool->used; +} +EXPORT_SYMBOL(qman_fqid_pool_used); diff --git a/include/linux/fsl_bman.h b/include/linux/fsl_bman.h new file mode 100644 index 0000000..072f326 --- /dev/null +++ b/include/linux/fsl_bman.h @@ -0,0 +1,510 @@ +/* Copyright 2008-2012 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FSL_BMAN_H +#define FSL_BMAN_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Last updated for v00.79 of the BG */ + +/* Portal processing (interrupt) sources */ +#define BM_PIRQ_RCRI 0x00000002 /* RCR Ring (below threshold) */ +#define BM_PIRQ_BSCN 0x00000001 /* Buffer depletion State Change */ + +/* This wrapper represents a bit-array for the depletion state of the 64 Bman + * buffer pools. */ +struct bman_depletion { + u32 __state[2]; +}; +#define BMAN_DEPLETION_EMPTY { { 0x00000000, 0x00000000 } } +#define BMAN_DEPLETION_FULL { { 0xffffffff, 0xffffffff } } +#define __bmdep_word(x) ((x) >> 5) +#define __bmdep_shift(x) ((x) & 0x1f) +#define __bmdep_bit(x) (0x80000000 >> __bmdep_shift(x)) +static inline void bman_depletion_init(struct bman_depletion *c) +{ + c->__state[0] = c->__state[1] = 0; +} +static inline void bman_depletion_fill(struct bman_depletion *c) +{ + c->__state[0] = c->__state[1] = ~0; +} +static inline int bman_depletion_get(const struct bman_depletion *c, u8 bpid) +{ + return c->__state[__bmdep_word(bpid)] & __bmdep_bit(bpid); +} +static inline void bman_depletion_set(struct bman_depletion *c, u8 bpid) +{ + c->__state[__bmdep_word(bpid)] |= __bmdep_bit(bpid); +} +static inline void bman_depletion_unset(struct bman_depletion *c, u8 bpid) +{ + c->__state[__bmdep_word(bpid)] &= ~__bmdep_bit(bpid); +} + +/* ------------------------------------------------------- */ +/* --- Bman data structures (and associated constants) --- */ + +/* Represents s/w corenet portal mapped data structures */ +struct bm_rcr_entry; /* RCR (Release Command Ring) entries */ +struct bm_mc_command; /* MC (Management Command) command */ +struct bm_mc_result; /* MC result */ + +/* Code-reduction, define a wrapper for 48-bit buffers. In cases where a buffer + * pool id specific to this buffer is needed (BM_RCR_VERB_CMD_BPID_MULTI, + * BM_MCC_VERB_ACQUIRE), the 'bpid' field is used. */ +struct bm_buffer { + union { + struct { + u8 __reserved1; + u8 bpid; + u16 hi; /* High 16-bits of 48-bit address */ + u32 lo; /* Low 32-bits of 48-bit address */ + }; + struct { + u64 __notaddress:16; + u64 addr:48; + }; + }; +} __attribute__((aligned(8))); +static inline u64 bm_buffer_get64(const struct bm_buffer *buf) +{ + return buf->addr; +} +static inline dma_addr_t bm_buf_addr(const struct bm_buffer *buf) +{ + return (dma_addr_t)buf->addr; +} +/* Macro, so we compile better if 'v' isn't always 64-bit */ +/* Note: this first version is causing a noticable performance degradation, + * which needs analysis, so leaving it commented out for now. The second version + * achieves optimal performance. */ +#if 0 +#define bm_buffer_set64(buf, v) \ + do { \ + struct bm_buffer *__buf931 = (buf); \ + __buf931->addr = v; \ + } while (0) +#else +#define bm_buffer_set64(buf, v) \ + do { \ + struct bm_buffer *__buf931 = (buf); \ + __buf931->hi = upper_32_bits(v); \ + __buf931->lo = lower_32_bits(v); \ + } while (0) +#endif + +/* See 1.5.3.5.4: "Release Command" */ +struct bm_rcr_entry { + union { + struct { + u8 __dont_write_directly__verb; + u8 bpid; /* used with BM_RCR_VERB_CMD_BPID_SINGLE */ + u8 __reserved1[62]; + }; + struct bm_buffer bufs[8]; + }; +} __packed; +#define BM_RCR_VERB_VBIT 0x80 +#define BM_RCR_VERB_CMD_MASK 0x70 /* one of two values; */ +#define BM_RCR_VERB_CMD_BPID_SINGLE 0x20 +#define BM_RCR_VERB_CMD_BPID_MULTI 0x30 +#define BM_RCR_VERB_BUFCOUNT_MASK 0x0f /* values 1..8 */ + +/* See 1.5.3.1: "Acquire Command" */ +/* See 1.5.3.2: "Query Command" */ +struct bm_mcc_acquire { + u8 bpid; + u8 __reserved1[62]; +} __packed; +struct bm_mcc_query { + u8 __reserved2[63]; +} __packed; +struct bm_mc_command { + u8 __dont_write_directly__verb; + union { + struct bm_mcc_acquire acquire; + struct bm_mcc_query query; + }; +} __packed; +#define BM_MCC_VERB_VBIT 0x80 +#define BM_MCC_VERB_CMD_MASK 0x70 /* where the verb contains; */ +#define BM_MCC_VERB_CMD_ACQUIRE 0x10 +#define BM_MCC_VERB_CMD_QUERY 0x40 +#define BM_MCC_VERB_ACQUIRE_BUFCOUNT 0x0f /* values 1..8 go here */ + +/* See 1.5.3.3: "Acquire Reponse" */ +/* See 1.5.3.4: "Query Reponse" */ +struct bm_pool_state { + u8 __reserved1[32]; + /* "availability state" and "depletion state" */ + struct { + u8 __reserved1[8]; + /* Access using bman_depletion_***() */ + struct bman_depletion state; + } as, ds; +}; +struct bm_mc_result { + union { + struct { + u8 verb; + u8 __reserved1[63]; + }; + union { + struct { + u8 __reserved1; + u8 bpid; + u8 __reserved2[62]; + }; + struct bm_buffer bufs[8]; + } acquire; + struct bm_pool_state query; + }; +} __packed; +#define BM_MCR_VERB_VBIT 0x80 +#define BM_MCR_VERB_CMD_MASK BM_MCC_VERB_CMD_MASK +#define BM_MCR_VERB_CMD_ACQUIRE BM_MCC_VERB_CMD_ACQUIRE +#define BM_MCR_VERB_CMD_QUERY BM_MCC_VERB_CMD_QUERY +#define BM_MCR_VERB_CMD_ERR_INVALID 0x60 +#define BM_MCR_VERB_CMD_ERR_ECC 0x70 +#define BM_MCR_VERB_ACQUIRE_BUFCOUNT BM_MCC_VERB_ACQUIRE_BUFCOUNT /* 0..8 */ +/* Determine the "availability state" of pool 'p' from a query result 'r' */ +#define BM_MCR_QUERY_AVAILABILITY(r,p) bman_depletion_get(&r->query.as.state,p) +/* Determine the "depletion state" of pool 'p' from a query result 'r' */ +#define BM_MCR_QUERY_DEPLETION(r,p) bman_depletion_get(&r->query.ds.state,p) + +/*******************************************************************/ +/* Managed (aka "shared" or "mux/demux") portal, high-level i/face */ +/*******************************************************************/ + + /* Portal and Buffer Pools */ + /* ----------------------- */ +/* Represents a managed portal */ +struct bman_portal; + +/* This object type represents Bman buffer pools. */ +struct bman_pool; + +struct bman_portal_config { + /* This is used for any "core-affine" portals, ie. default portals + * associated to the corresponding cpu. -1 implies that there is no core + * affinity configured. */ + int cpu; + /* portal interrupt line */ + int irq; + /* the unique index of this portal */ + u32 index; + /* Is this portal shared? (If so, it has coarser locking and demuxes + * processing on behalf of other CPUs.) */ + int is_shared; + /* These are the buffer pool IDs that may be used via this portal. */ + struct bman_depletion mask; +}; + +/* This callback type is used when handling pool depletion entry/exit. The + * 'cb_ctx' value is the opaque value associated with the pool object in + * bman_new_pool(). 'depleted' is non-zero on depletion-entry, and zero on + * depletion-exit. */ +typedef void (*bman_cb_depletion)(struct bman_portal *bm, + struct bman_pool *pool, void *cb_ctx, int depleted); + +/* This struct specifies parameters for a bman_pool object. */ +struct bman_pool_params { + /* index of the buffer pool to encapsulate (0-63), ignored if + * BMAN_POOL_FLAG_DYNAMIC_BPID is set. */ + u32 bpid; + /* bit-mask of BMAN_POOL_FLAG_*** options */ + u32 flags; + /* depletion-entry/exit callback, if BMAN_POOL_FLAG_DEPLETION is set */ + bman_cb_depletion cb; + /* opaque user value passed as a parameter to 'cb' */ + void *cb_ctx; + /* depletion-entry/exit thresholds, if BMAN_POOL_FLAG_THRESH is set. NB: + * this is only allowed if BMAN_POOL_FLAG_DYNAMIC_BPID is used *and* + * when run in the control plane (which controls Bman CCSR). This array + * matches the definition of bm_pool_set(). */ + u32 thresholds[4]; +}; + +/* Flags to bman_new_pool() */ +#define BMAN_POOL_FLAG_NO_RELEASE 0x00000001 /* can't release to pool */ +#define BMAN_POOL_FLAG_ONLY_RELEASE 0x00000002 /* can only release to pool */ +#define BMAN_POOL_FLAG_DEPLETION 0x00000004 /* track depletion entry/exit */ +#define BMAN_POOL_FLAG_DYNAMIC_BPID 0x00000008 /* (de)allocate bpid */ +#define BMAN_POOL_FLAG_THRESH 0x00000010 /* set depletion thresholds */ +#define BMAN_POOL_FLAG_STOCKPILE 0x00000020 /* stockpile to reduce hw ops */ + +/* Flags to bman_release() */ +#ifdef CONFIG_FSL_DPA_CAN_WAIT +#define BMAN_RELEASE_FLAG_WAIT 0x00000001 /* wait if RCR is full */ +#define BMAN_RELEASE_FLAG_WAIT_INT 0x00000002 /* if we wait, interruptible? */ +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC +#define BMAN_RELEASE_FLAG_WAIT_SYNC 0x00000004 /* if wait, until consumed? */ +#endif +#endif +#define BMAN_RELEASE_FLAG_NOW 0x00000008 /* issue immediate release */ + +/* Flags to bman_acquire() */ +#define BMAN_ACQUIRE_FLAG_STOCKPILE 0x00000001 /* no hw op, stockpile only */ + + /* Portal Management */ + /* ----------------- */ +/** + * bman_get_portal_config - get portal configuration settings + * + * This returns a read-only view of the current cpu's affine portal settings. + */ +const struct bman_portal_config *bman_get_portal_config(void); + +/** + * bman_irqsource_get - return the portal work that is interrupt-driven + * + * Returns a bitmask of BM_PIRQ_**I processing sources that are currently + * enabled for interrupt handling on the current cpu's affine portal. These + * sources will trigger the portal interrupt and the interrupt handler (or a + * tasklet/bottom-half it defers to) will perform the corresponding processing + * work. The bman_poll_***() functions will only process sources that are not in + * this bitmask. If the current CPU is sharing a portal hosted on another CPU, + * this always returns zero. + */ +u32 bman_irqsource_get(void); + +/** + * bman_irqsource_add - add processing sources to be interrupt-driven + * @bits: bitmask of BM_PIRQ_**I processing sources + * + * Adds processing sources that should be interrupt-driven (rather than + * processed via bman_poll_***() functions). Returns zero for success, or + * -EINVAL if the current CPU is sharing a portal hosted on another CPU. */ +int bman_irqsource_add(u32 bits); + +/** + * bman_irqsource_remove - remove processing sources from being interrupt-driven + * @bits: bitmask of BM_PIRQ_**I processing sources + * + * Removes processing sources from being interrupt-driven, so that they will + * instead be processed via bman_poll_***() functions. Returns zero for success, + * or -EINVAL if the current CPU is sharing a portal hosted on another CPU. */ +int bman_irqsource_remove(u32 bits); + +/** + * bman_affine_cpus - return a mask of cpus that have affine portals + */ +const cpumask_t *bman_affine_cpus(void); + +/** + * bman_poll_slow - process anything that isn't interrupt-driven. + * + * This function does any portal processing that isn't interrupt-driven. If the + * current CPU is sharing a portal hosted on another CPU, this function will + * return -EINVAL, otherwise the return value is a bitmask of BM_PIRQ_* sources + * indicating what interrupt sources were actually processed by the call. + * + * NB, unlike the legacy wrapper bman_poll(), this function will + * deterministically check for the presence of portal processing work and do it, + * which implies some latency even if there's nothing to do. The bman_poll() + * wrapper on the other hand (like the qman_poll() wrapper) attenuates this by + * checking for (and doing) portal processing infrequently. Ie. such that + * qman_poll() and bman_poll() can be called from core-processing loops. Use + * bman_poll_slow() when you yourself are deciding when to incur the overhead of + * processing. + */ +u32 bman_poll_slow(void); + +/** + * bman_poll - process anything that isn't interrupt-driven. + * + * Dispatcher logic on a cpu can use this to trigger any maintenance of the + * affine portal. This function does whatever processing is not triggered by + * interrupts. This is a legacy wrapper that can be used in core-processing + * loops but mitigates the performance overhead of portal processing by + * adaptively bypassing true portal processing most of the time. (Processing is + * done once every 10 calls if the previous processing revealed that work needed + * to be done, or once very 1000 calls if the previous processing revealed no + * work needed doing.) If you wish to control this yourself, call + * bman_poll_slow() instead, which always checks for portal processing work. + */ +void bman_poll(void); + +/** + * bman_rcr_is_empty - Determine if portal's RCR is empty + * + * For use in situations where a cpu-affine caller needs to determine when all + * releases for the local portal have been processed by Bman but can't use the + * BMAN_RELEASE_FLAG_WAIT_SYNC flag to do this from the final bman_release(). + * The function forces tracking of RCR consumption (which normally doesn't + * happen until release processing needs to find space to put new release + * commands), and returns zero if the ring still has unprocessed entries, + * non-zero if it is empty. + */ +int bman_rcr_is_empty(void); + +/** + * bman_alloc_bpid_range - Allocate a contiguous range of BPIDs + * @result: is set by the API to the base BPID of the allocated range + * @count: the number of BPIDs required + * @align: required alignment of the allocated range + * @partial: non-zero if the API can return fewer than @count BPIDs + * + * Returns the number of buffer pools allocated, or a negative error code. If + * @partial is non zero, the allocation request may return a smaller range of + * BPs than requested (though alignment will be as requested). If @partial is + * zero, the return value will either be 'count' or negative. + */ +int bman_alloc_bpid_range(u32 *result, u32 count, u32 align, int partial); +static inline int bman_alloc_bpid(u32 *result) +{ + int ret = bman_alloc_bpid_range(result, 1, 0, 0); + return (ret > 0) ? 0 : ret; +} + +/** + * bman_release_bpid_range - Release the specified range of buffer pool IDs + * @bpid: the base BPID of the range to deallocate + * @count: the number of BPIDs in the range + * + * This function can also be used to seed the allocator with ranges of BPIDs + * that it can subsequently allocate from. + */ +void bman_release_bpid_range(u32 bpid, unsigned int count); +static inline void bman_release_bpid(u32 bpid) +{ + bman_release_bpid_range(bpid, 1); +} + + + /* Pool management */ + /* --------------- */ +/** + * bman_new_pool - Allocates a Buffer Pool object + * @params: parameters specifying the buffer pool ID and behaviour + * + * Creates a pool object for the given @params. A portal and the depletion + * callback field of @params are only used if the BMAN_POOL_FLAG_DEPLETION flag + * is set. NB, the fields from @params are copied into the new pool object, so + * the structure provided by the caller can be released or reused after the + * function returns. + */ +struct bman_pool *bman_new_pool(const struct bman_pool_params *params); + +/** + * bman_free_pool - Deallocates a Buffer Pool object + * @pool: the pool object to release + * + */ +void bman_free_pool(struct bman_pool *pool); + +/** + * bman_get_params - Returns a pool object's parameters. + * @pool: the pool object + * + * The returned pointer refers to state within the pool object so must not be + * modified and can no longer be read once the pool object is destroyed. + */ +const struct bman_pool_params *bman_get_params(const struct bman_pool *pool); + +/** + * bman_release - Release buffer(s) to the buffer pool + * @pool: the buffer pool object to release to + * @bufs: an array of buffers to release + * @num: the number of buffers in @bufs (1-8) + * @flags: bit-mask of BMAN_RELEASE_FLAG_*** options + * + * Adds the given buffers to RCR entries. If the portal @p was created with the + * "COMPACT" flag, then it will be using a compaction algorithm to improve + * utilisation of RCR. As such, these buffers may join an existing ring entry + * and/or it may not be issued right away so as to allow future releases to join + * the same ring entry. Use the BMAN_RELEASE_FLAG_NOW flag to override this + * behaviour by committing the RCR entry (or entries) right away. If the RCR + * ring is full, the function will return -EBUSY unless BMAN_RELEASE_FLAG_WAIT + * is selected, in which case it will sleep waiting for space to become + * available in RCR. If the function receives a signal before such time (and + * BMAN_RELEASE_FLAG_WAIT_INT is set), the function returns -EINTR. Otherwise, + * it returns zero. + */ +int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num, + u32 flags); + +/** + * bman_acquire - Acquire buffer(s) from a buffer pool + * @pool: the buffer pool object to acquire from + * @bufs: array for storing the acquired buffers + * @num: the number of buffers desired (@bufs is at least this big) + * + * Issues an "Acquire" command via the portal's management command interface. + * The return value will be the number of buffers obtained from the pool, or a + * negative error code if a h/w error or pool starvation was encountered. + */ +int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num, + u32 flags); + +/** + * bman_flush_stockpile - Flush stockpile buffer(s) to the buffer pool + * @pool: the buffer pool object the stockpile belongs + * @flags: bit-mask of BMAN_RELEASE_FLAG_*** options + * + * Adds stockpile buffers to RCR entries until the stockpile is empty. + * The return value will be a negative error code if a h/w error occured. + * If BMAN_RELEASE_FLAG_NOW flag is passed and RCR ring is full, + * -EAGAIN will be returned. + */ +int bman_flush_stockpile(struct bman_pool *pool, u32 flags); + +/** + * bman_query_pools - Query all buffer pool states + * @state: storage for the queried availability and depletion states + */ +int bman_query_pools(struct bm_pool_state *state); + +#ifdef CONFIG_FSL_BMAN_CONFIG +/** + * bman_query_free_buffers - Query how many free buffers are in buffer pool + * @pool: the buffer pool object to query + * + * Return the number of the free buffers + */ +u32 bman_query_free_buffers(struct bman_pool *pool); + +/** + * bman_update_pool_thresholds - Change the buffer pool's depletion thresholds + * @pool: the buffer pool object to which the thresholds will be set + * @thresholds: the new thresholds + */ +int bman_update_pool_thresholds(struct bman_pool *pool, const u32 *thresholds); +#endif +#ifdef __cplusplus +} +#endif + +#endif /* FSL_BMAN_H */ diff --git a/include/linux/fsl_qman.h b/include/linux/fsl_qman.h new file mode 100644 index 0000000..68a8768 --- /dev/null +++ b/include/linux/fsl_qman.h @@ -0,0 +1,3213 @@ +/* Copyright 2008-2012 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FSL_QMAN_H +#define FSL_QMAN_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Last updated for v00.800 of the BG */ + +/* Hardware constants */ +#define QM_CHANNEL_SWPORTAL0 0 +#define QMAN_CHANNEL_POOL1 0x21 +#define QMAN_CHANNEL_CAAM 0x80 +#define QMAN_CHANNEL_PME 0xa0 +#define QMAN_CHANNEL_POOL1_REV3 0x401 +#define QMAN_CHANNEL_CAAM_REV3 0x840 +#define QMAN_CHANNEL_PME_REV3 0x860 +extern u16 qm_channel_pool1; +extern u16 qm_channel_caam; +extern u16 qm_channel_pme; +enum qm_dc_portal { + qm_dc_portal_fman0 = 0, + qm_dc_portal_fman1 = 1, + qm_dc_portal_caam = 2, + qm_dc_portal_pme = 3 +}; + +/* Portal processing (interrupt) sources */ +#define QM_PIRQ_CSCI 0x00100000 /* Congestion State Change */ +#define QM_PIRQ_EQCI 0x00080000 /* Enqueue Command Committed */ +#define QM_PIRQ_EQRI 0x00040000 /* EQCR Ring (below threshold) */ +#define QM_PIRQ_DQRI 0x00020000 /* DQRR Ring (non-empty) */ +#define QM_PIRQ_MRI 0x00010000 /* MR Ring (non-empty) */ +/* This mask contains all the interrupt sources that need handling except DQRI, + * ie. that if present should trigger slow-path processing. */ +#define QM_PIRQ_SLOW (QM_PIRQ_CSCI | QM_PIRQ_EQCI | QM_PIRQ_EQRI | \ + QM_PIRQ_MRI) + +/* --- Clock speed --- */ +/* A qman driver instance may or may not know the current qman clock speed. + * However, certain CEETM calculations may not be possible if this is not known. + * The 'set' function will only succeed (return zero) if the driver did not + * already know the clock speed. Likewise, the 'get' function will only succeed + * if the driver does know the clock speed (either because it knew when booting, + * or was told via 'set'). In cases where software is running on a driver + * instance that does not know the clock speed (eg. on a hypervised data-plane), + * and the user can obtain the current qman clock speed by other means (eg. from + * a message sent from the control-plane), then the 'set' function can be used + * to enable rate-calculations in a driver where it would otherwise not be + * possible. */ +int qm_get_clock(u64 *clock_hz); +int qm_set_clock(u64 clock_hz); + +/* For qman_static_dequeue_*** APIs */ +#define QM_SDQCR_CHANNELS_POOL_MASK 0x00007fff +/* for n in [1,15] */ +#define QM_SDQCR_CHANNELS_POOL(n) (0x00008000 >> (n)) +/* for conversion from n of qm_channel */ +static inline u32 QM_SDQCR_CHANNELS_POOL_CONV(u16 channel) +{ + return QM_SDQCR_CHANNELS_POOL(channel + 1 - qm_channel_pool1); +} + +/* For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use + * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use + * FQID(n) to fill in the frame queue ID. */ +#define QM_VDQCR_PRECEDENCE_VDQCR 0x0 +#define QM_VDQCR_PRECEDENCE_SDQCR 0x80000000 +#define QM_VDQCR_EXACT 0x40000000 +#define QM_VDQCR_NUMFRAMES_MASK 0x3f000000 +#define QM_VDQCR_NUMFRAMES_SET(n) (((n) & 0x3f) << 24) +#define QM_VDQCR_NUMFRAMES_GET(n) (((n) >> 24) & 0x3f) +#define QM_VDQCR_NUMFRAMES_TILLEMPTY QM_VDQCR_NUMFRAMES_SET(0) + + +/* ------------------------------------------------------- */ +/* --- Qman data structures (and associated constants) --- */ + +/* Represents s/w corenet portal mapped data structures */ +struct qm_eqcr_entry; /* EQCR (EnQueue Command Ring) entries */ +struct qm_dqrr_entry; /* DQRR (DeQueue Response Ring) entries */ +struct qm_mr_entry; /* MR (Message Ring) entries */ +struct qm_mc_command; /* MC (Management Command) command */ +struct qm_mc_result; /* MC result */ + +/* See David Lapp's "Frame formats" document, "dpateam", Jan 07, 2008 */ +#define QM_FD_FORMAT_SG 0x4 +#define QM_FD_FORMAT_LONG 0x2 +#define QM_FD_FORMAT_COMPOUND 0x1 +enum qm_fd_format { + /* 'contig' implies a contiguous buffer, whereas 'sg' implies a + * scatter-gather table. 'big' implies a 29-bit length with no offset + * field, otherwise length is 20-bit and offset is 9-bit. 'compound' + * implies a s/g-like table, where each entry itself represents a frame + * (contiguous or scatter-gather) and the 29-bit "length" is + * interpreted purely for congestion calculations, ie. a "congestion + * weight". */ + qm_fd_contig = 0, + qm_fd_contig_big = QM_FD_FORMAT_LONG, + qm_fd_sg = QM_FD_FORMAT_SG, + qm_fd_sg_big = QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG, + qm_fd_compound = QM_FD_FORMAT_COMPOUND +}; + +/* Capitalised versions are un-typed but can be used in static expressions */ +#define QM_FD_CONTIG 0 +#define QM_FD_CONTIG_BIG QM_FD_FORMAT_LONG +#define QM_FD_SG QM_FD_FORMAT_SG +#define QM_FD_SG_BIG (QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG) +#define QM_FD_COMPOUND QM_FD_FORMAT_COMPOUND + +/* See 1.5.1.1: "Frame Descriptor (FD)" */ +struct qm_fd { + union { + struct { + u8 dd:2; /* dynamic debug */ + u8 liodn_offset:6; + u8 bpid:8; /* Buffer Pool ID */ + u8 eliodn_offset:4; + u8 __reserved:4; + u8 addr_hi; /* high 8-bits of 40-bit address */ + u32 addr_lo; /* low 32-bits of 40-bit address */ + }; + struct { + u64 __notaddress:24; + /* More efficient address accessor */ + u64 addr:40; + }; + u64 opaque_addr; + }; + /* The 'format' field indicates the interpretation of the remaining 29 + * bits of the 32-bit word. For packing reasons, it is duplicated in the + * other union elements. Note, union'd structs are difficult to use with + * static initialisation under gcc, in which case use the "opaque" form + * with one of the macros. */ + union { + /* For easier/faster copying of this part of the fd (eg. from a + * DQRR entry to an EQCR entry) copy 'opaque' */ + u32 opaque; + /* If 'format' is _contig or _sg, 20b length and 9b offset */ + struct { + enum qm_fd_format format:3; + u16 offset:9; + u32 length20:20; + }; + /* If 'format' is _contig_big or _sg_big, 29b length */ + struct { + enum qm_fd_format _format1:3; + u32 length29:29; + }; + /* If 'format' is _compound, 29b "congestion weight" */ + struct { + enum qm_fd_format _format2:3; + u32 cong_weight:29; + }; + }; + union { + u32 cmd; + u32 status; + }; +} __attribute__((aligned(8))); +#define QM_FD_DD_NULL 0x00 +#define QM_FD_PID_MASK 0x3f +static inline u64 qm_fd_addr_get64(const struct qm_fd *fd) +{ + return fd->addr; +} + +static inline dma_addr_t qm_fd_addr(const struct qm_fd *fd) +{ + return (dma_addr_t)fd->addr; +} +/* Macro, so we compile better if 'v' isn't always 64-bit */ +#define qm_fd_addr_set64(fd, v) \ + do { \ + struct qm_fd *__fd931 = (fd); \ + __fd931->addr = v; \ + } while (0) + +/* For static initialisation of FDs (which is complicated by the use of unions + * in "struct qm_fd"), use the following macros. Note that; + * - 'dd', 'pid' and 'bpid' are ignored because there's no static initialisation + * use-case), + * - use capitalised QM_FD_*** formats for static initialisation. + */ +#define QM_FD_FMT_20(cmd, addr_hi, addr_lo, fmt, off, len) \ + { 0, 0, 0, 0, 0, addr_hi, addr_lo, \ + { (((fmt)&0x7) << 29) | (((off)&0x1ff) << 20) | ((len)&0xfffff) }, \ + { cmd } } +#define QM_FD_FMT_29(cmd, addr_hi, addr_lo, fmt, len) \ + { 0, 0, 0, 0, 0, addr_hi, addr_lo, \ + { (((fmt)&0x7) << 29) | ((len)&0x1fffffff) }, \ + { cmd } } + +/* See 2.2.1.3 Multi-Core Datapath Acceleration Architecture */ +struct qm_sg_entry { + union { + struct { + u8 __reserved1[3]; + u8 addr_hi; /* high 8-bits of 40-bit address */ + u32 addr_lo; /* low 32-bits of 40-bit address */ + }; + struct { + u64 __notaddress:24; + u64 addr:40; + }; + }; + u32 extension:1; /* Extension bit */ + u32 final:1; /* Final bit */ + u32 length:30; + u8 __reserved2; + u8 bpid; + u16 __reserved3:3; + u16 offset:13; +} __packed; +static inline u64 qm_sg_entry_get64(const struct qm_sg_entry *sg) +{ + return sg->addr; +} +static inline dma_addr_t qm_sg_addr(const struct qm_sg_entry *sg) +{ + return (dma_addr_t)sg->addr; +} +/* Macro, so we compile better if 'v' isn't always 64-bit */ +#define qm_sg_entry_set64(sg, v) \ + do { \ + struct qm_sg_entry *__sg931 = (sg); \ + __sg931->addr = v; \ + } while (0) + +/* See 1.5.8.1: "Enqueue Command" */ +struct qm_eqcr_entry { + u8 __dont_write_directly__verb; + u8 dca; + u16 seqnum; + u32 orp; /* 24-bit */ + u32 fqid; /* 24-bit */ + u32 tag; + struct qm_fd fd; + u8 __reserved3[32]; +} __packed; +#define QM_EQCR_VERB_VBIT 0x80 +#define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */ +#define QM_EQCR_VERB_CMD_ENQUEUE 0x01 +#define QM_EQCR_VERB_COLOUR_MASK 0x18 /* 4 possible values; */ +#define QM_EQCR_VERB_COLOUR_GREEN 0x00 +#define QM_EQCR_VERB_COLOUR_YELLOW 0x08 +#define QM_EQCR_VERB_COLOUR_RED 0x10 +#define QM_EQCR_VERB_COLOUR_OVERRIDE 0x18 +#define QM_EQCR_VERB_INTERRUPT 0x04 /* on command consumption */ +#define QM_EQCR_VERB_ORP 0x02 /* enable order restoration */ +#define QM_EQCR_DCA_ENABLE 0x80 +#define QM_EQCR_DCA_PARK 0x40 +#define QM_EQCR_DCA_IDXMASK 0x0f /* "DQRR::idx" goes here */ +#define QM_EQCR_SEQNUM_NESN 0x8000 /* Advance NESN */ +#define QM_EQCR_SEQNUM_NLIS 0x4000 /* More fragments to come */ +#define QM_EQCR_SEQNUM_SEQMASK 0x3fff /* sequence number goes here */ +#define QM_EQCR_FQID_NULL 0 /* eg. for an ORP seqnum hole */ + +/* See 1.5.8.2: "Frame Dequeue Response" */ +struct qm_dqrr_entry { + u8 verb; + u8 stat; + u16 seqnum; /* 15-bit */ + u8 tok; + u8 __reserved2[3]; + u32 fqid; /* 24-bit */ + u32 contextB; + struct qm_fd fd; + u8 __reserved4[32]; +}; +#define QM_DQRR_VERB_VBIT 0x80 +#define QM_DQRR_VERB_MASK 0x7f /* where the verb contains; */ +#define QM_DQRR_VERB_FRAME_DEQUEUE 0x60 /* "this format" */ +#define QM_DQRR_STAT_FQ_EMPTY 0x80 /* FQ empty */ +#define QM_DQRR_STAT_FQ_HELDACTIVE 0x40 /* FQ held active */ +#define QM_DQRR_STAT_FQ_FORCEELIGIBLE 0x20 /* FQ was force-eligible'd */ +#define QM_DQRR_STAT_FD_VALID 0x10 /* has a non-NULL FD */ +#define QM_DQRR_STAT_UNSCHEDULED 0x02 /* Unscheduled dequeue */ +#define QM_DQRR_STAT_DQCR_EXPIRED 0x01 /* VDQCR or PDQCR expired*/ + +/* See 1.5.8.3: "ERN Message Response" */ +/* See 1.5.8.4: "FQ State Change Notification" */ +struct qm_mr_entry { + u8 verb; + union { + struct { + u8 dca; + u16 seqnum; + u8 rc; /* Rejection Code */ + u32 orp:24; + u32 fqid; /* 24-bit */ + u32 tag; + struct qm_fd fd; + } __packed ern; + struct { + u8 colour:2; /* See QM_MR_DCERN_COLOUR_* */ + u8 __reserved1:4; + enum qm_dc_portal portal:2; + u16 __reserved2; + u8 rc; /* Rejection Code */ + u32 __reserved3:24; + u32 fqid; /* 24-bit */ + u32 tag; + struct qm_fd fd; + } __packed dcern; + struct { + u8 fqs; /* Frame Queue Status */ + u8 __reserved1[6]; + u32 fqid; /* 24-bit */ + u32 contextB; + u8 __reserved2[16]; + } __packed fq; /* FQRN/FQRNI/FQRL/FQPN */ + }; + u8 __reserved2[32]; +} __packed; +#define QM_MR_VERB_VBIT 0x80 +/* The "ern" VERB bits match QM_EQCR_VERB_*** so aren't reproduced here. ERNs + * originating from direct-connect portals ("dcern") use 0x20 as a verb which + * would be invalid as a s/w enqueue verb. A s/w ERN can be distinguished from + * the other MR types by noting if the 0x20 bit is unset. */ +#define QM_MR_VERB_TYPE_MASK 0x27 +#define QM_MR_VERB_DC_ERN 0x20 +#define QM_MR_VERB_FQRN 0x21 +#define QM_MR_VERB_FQRNI 0x22 +#define QM_MR_VERB_FQRL 0x23 +#define QM_MR_VERB_FQPN 0x24 +#define QM_MR_RC_MASK 0xf0 /* contains one of; */ +#define QM_MR_RC_CGR_TAILDROP 0x00 +#define QM_MR_RC_WRED 0x10 +#define QM_MR_RC_ERROR 0x20 +#define QM_MR_RC_ORPWINDOW_EARLY 0x30 +#define QM_MR_RC_ORPWINDOW_LATE 0x40 +#define QM_MR_RC_FQ_TAILDROP 0x50 +#define QM_MR_RC_ORPWINDOW_RETIRED 0x60 +#define QM_MR_RC_ORP_ZERO 0x70 +#define QM_MR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */ +#define QM_MR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */ +#define QM_MR_DCERN_COLOUR_GREEN 0x00 +#define QM_MR_DCERN_COLOUR_YELLOW 0x01 +#define QM_MR_DCERN_COLOUR_RED 0x02 +#define QM_MR_DCERN_COLOUR_OVERRIDE 0x03 + +/* An identical structure of FQD fields is present in the "Init FQ" command and + * the "Query FQ" result, it's suctioned out into the "struct qm_fqd" type. + * Within that, the 'stashing' and 'taildrop' pieces are also factored out, the + * latter has two inlines to assist with converting to/from the mant+exp + * representation. */ +struct qm_fqd_stashing { + /* See QM_STASHING_EXCL_<...> */ + u8 exclusive; + u8 __reserved1:2; + /* Numbers of cachelines */ + u8 annotation_cl:2; + u8 data_cl:2; + u8 context_cl:2; +} __packed; +struct qm_fqd_taildrop { + u16 __reserved1:3; + u16 mant:8; + u16 exp:5; +} __packed; +struct qm_fqd_oac { + /* See QM_OAC_<...> */ + u8 oac:2; /* "Overhead Accounting Control" */ + u8 __reserved1:6; + /* Two's-complement value (-128 to +127) */ + signed char oal; /* "Overhead Accounting Length" */ +} __packed; +struct qm_fqd { + union { + u8 orpc; + struct { + u8 __reserved1:2; + u8 orprws:3; + u8 oa:1; + u8 olws:2; + } __packed; + }; + u8 cgid; + u16 fq_ctrl; /* See QM_FQCTRL_<...> */ + union { + u16 dest_wq; + struct { + u16 channel:13; /* qm_channel */ + u16 wq:3; + } __packed dest; + }; + u16 __reserved2:1; + u16 ics_cred:15; + /* For "Initialize Frame Queue" commands, the write-enable mask + * determines whether 'td' or 'oac_init' is observed. For query + * commands, this field is always 'td', and 'oac_query' (below) reflects + * the Overhead ACcounting values. */ + union { + struct qm_fqd_taildrop td; + struct qm_fqd_oac oac_init; + }; + u32 context_b; + union { + /* Treat it as 64-bit opaque */ + u64 opaque; + struct { + u32 hi; + u32 lo; + }; + /* Treat it as s/w portal stashing config */ + /* See 1.5.6.7.1: "FQD Context_A field used for [...] */ + struct { + struct qm_fqd_stashing stashing; + /* 48-bit address of FQ context to + * stash, must be cacheline-aligned */ + u16 context_hi; + u32 context_lo; + } __packed; + } context_a; + struct qm_fqd_oac oac_query; +} __packed; +/* 64-bit converters for context_hi/lo */ +static inline u64 qm_fqd_stashing_get64(const struct qm_fqd *fqd) +{ + return ((u64)fqd->context_a.context_hi << 32) | + (u64)fqd->context_a.context_lo; +} +static inline dma_addr_t qm_fqd_stashing_addr(const struct qm_fqd *fqd) +{ + return (dma_addr_t)qm_fqd_stashing_get64(fqd); +} +static inline u64 qm_fqd_context_a_get64(const struct qm_fqd *fqd) +{ + return ((u64)fqd->context_a.hi << 32) | + (u64)fqd->context_a.lo; +} +/* Macro, so we compile better when 'v' isn't necessarily 64-bit */ +#define qm_fqd_stashing_set64(fqd, v) \ + do { \ + struct qm_fqd *__fqd931 = (fqd); \ + __fqd931->context_a.context_hi = upper_32_bits(v); \ + __fqd931->context_a.context_lo = lower_32_bits(v); \ + } while (0) +#define qm_fqd_context_a_set64(fqd, v) \ + do { \ + struct qm_fqd *__fqd931 = (fqd); \ + __fqd931->context_a.hi = upper_32_bits(v); \ + __fqd931->context_a.lo = lower_32_bits(v); \ + } while (0) +/* convert a threshold value into mant+exp representation */ +static inline int qm_fqd_taildrop_set(struct qm_fqd_taildrop *td, u32 val, + int roundup) +{ + u32 e = 0; + int oddbit = 0; + if (val > 0xe0000000) + return -ERANGE; + while (val > 0xff) { + oddbit = val & 1; + val >>= 1; + e++; + if (roundup && oddbit) + val++; + } + td->exp = e; + td->mant = val; + return 0; +} +/* and the other direction */ +static inline u32 qm_fqd_taildrop_get(const struct qm_fqd_taildrop *td) +{ + return (u32)td->mant << td->exp; +} + +/* See 1.5.2.2: "Frame Queue Descriptor (FQD)" */ +/* Frame Queue Descriptor (FQD) field 'fq_ctrl' uses these constants */ +#define QM_FQCTRL_MASK 0x07ff /* 'fq_ctrl' flags; */ +#define QM_FQCTRL_CGE 0x0400 /* Congestion Group Enable */ +#define QM_FQCTRL_TDE 0x0200 /* Tail-Drop Enable */ +#define QM_FQCTRL_ORP 0x0100 /* ORP Enable */ +#define QM_FQCTRL_CTXASTASHING 0x0080 /* Context-A stashing */ +#define QM_FQCTRL_CPCSTASH 0x0040 /* CPC Stash Enable */ +#define QM_FQCTRL_FORCESFDR 0x0008 /* High-priority SFDRs */ +#define QM_FQCTRL_AVOIDBLOCK 0x0004 /* Don't block active */ +#define QM_FQCTRL_HOLDACTIVE 0x0002 /* Hold active in portal */ +#define QM_FQCTRL_PREFERINCACHE 0x0001 /* Aggressively cache FQD */ +#define QM_FQCTRL_LOCKINCACHE QM_FQCTRL_PREFERINCACHE /* older naming */ + +/* See 1.5.6.7.1: "FQD Context_A field used for [...] */ +/* Frame Queue Descriptor (FQD) field 'CONTEXT_A' uses these constants */ +#define QM_STASHING_EXCL_ANNOTATION 0x04 +#define QM_STASHING_EXCL_DATA 0x02 +#define QM_STASHING_EXCL_CTX 0x01 + +/* See 1.5.5.3: "Intra Class Scheduling" */ +/* FQD field 'OAC' (Overhead ACcounting) uses these constants */ +#define QM_OAC_ICS 0x2 /* Accounting for Intra-Class Scheduling */ +#define QM_OAC_CG 0x1 /* Accounting for Congestion Groups */ + +/* See 1.5.8.4: "FQ State Change Notification" */ +/* This struct represents the 32-bit "WR_PARM_[GYR]" parameters in CGR fields + * and associated commands/responses. The WRED parameters are calculated from + * these fields as follows; + * MaxTH = MA * (2 ^ Mn) + * Slope = SA / (2 ^ Sn) + * MaxP = 4 * (Pn + 1) + */ +struct qm_cgr_wr_parm { + union { + u32 word; + struct { + u32 MA:8; + u32 Mn:5; + u32 SA:7; /* must be between 64-127 */ + u32 Sn:6; + u32 Pn:6; + } __packed; + }; +} __packed; +/* This struct represents the 13-bit "CS_THRES" CGR field. In the corresponding + * management commands, this is padded to a 16-bit structure field, so that's + * how we represent it here. The congestion state threshold is calculated from + * these fields as follows; + * CS threshold = TA * (2 ^ Tn) + */ +struct qm_cgr_cs_thres { + u16 __reserved:3; + u16 TA:8; + u16 Tn:5; +} __packed; +/* This identical structure of CGR fields is present in the "Init/Modify CGR" + * commands and the "Query CGR" result. It's suctioned out here into its own + * struct. */ +struct __qm_mc_cgr { + struct qm_cgr_wr_parm wr_parm_g; + struct qm_cgr_wr_parm wr_parm_y; + struct qm_cgr_wr_parm wr_parm_r; + u8 wr_en_g; /* boolean, use QM_CGR_EN */ + u8 wr_en_y; /* boolean, use QM_CGR_EN */ + u8 wr_en_r; /* boolean, use QM_CGR_EN */ + u8 cscn_en; /* boolean, use QM_CGR_EN */ + union { + struct { + u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */ + u16 cscn_targ_dcp_low; /* CSCN_TARG_DCP low-16bits */ + }; + u32 cscn_targ; /* use QM_CGR_TARG_* */ + }; + u8 cstd_en; /* boolean, use QM_CGR_EN */ + u8 cs; /* boolean, only used in query response */ + struct qm_cgr_cs_thres cs_thres; /* use qm_cgr_cs_thres_set64() */ + u8 mode; /* QMAN_CGR_MODE_FRAME not supported in rev1.0 */ +} __packed; +#define QM_CGR_EN 0x01 /* For wr_en_*, cscn_en, cstd_en */ +#define QM_CGR_TARG_UDP_CTRL_WRITE_BIT 0x8000 /* value written to portal bit*/ +#define QM_CGR_TARG_UDP_CTRL_DCP 0x4000 /* 0: SWP, 1: DCP */ +#define QM_CGR_TARG_PORTAL(n) (0x80000000 >> (n)) /* s/w portal, 0-9 */ +#define QM_CGR_TARG_FMAN0 0x00200000 /* direct-connect portal: fman0 */ +#define QM_CGR_TARG_FMAN1 0x00100000 /* : fman1 */ +/* Convert CGR thresholds to/from "cs_thres" format */ +static inline u64 qm_cgr_cs_thres_get64(const struct qm_cgr_cs_thres *th) +{ + return (u64)th->TA << th->Tn; +} +static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val, + int roundup) +{ + u32 e = 0; + int oddbit = 0; + while (val > 0xff) { + oddbit = val & 1; + val >>= 1; + e++; + if (roundup && oddbit) + val++; + } + th->Tn = e; + th->TA = val; + return 0; +} + +/* See 1.5.8.5.1: "Initialize FQ" */ +/* See 1.5.8.5.2: "Query FQ" */ +/* See 1.5.8.5.3: "Query FQ Non-Programmable Fields" */ +/* See 1.5.8.5.4: "Alter FQ State Commands " */ +/* See 1.5.8.6.1: "Initialize/Modify CGR" */ +/* See 1.5.8.6.2: "CGR Test Write" */ +/* See 1.5.8.6.3: "Query CGR" */ +/* See 1.5.8.6.4: "Query Congestion Group State" */ +struct qm_mcc_initfq { + u8 __reserved1; + u16 we_mask; /* Write Enable Mask */ + u32 fqid; /* 24-bit */ + u16 count; /* Initialises 'count+1' FQDs */ + struct qm_fqd fqd; /* the FQD fields go here */ + u8 __reserved3[30]; +} __packed; +struct qm_mcc_queryfq { + u8 __reserved1[3]; + u32 fqid; /* 24-bit */ + u8 __reserved2[56]; +} __packed; +struct qm_mcc_queryfq_np { + u8 __reserved1[3]; + u32 fqid; /* 24-bit */ + u8 __reserved2[56]; +} __packed; +struct qm_mcc_alterfq { + u8 __reserved1[3]; + u32 fqid; /* 24-bit */ + u8 __reserved2; + u8 count; /* number of consecutive FQID */ + u8 __reserved3[10]; + u32 context_b; /* frame queue context b */ + u8 __reserved4[40]; +} __packed; +struct qm_mcc_initcgr { + u8 __reserved1; + u16 we_mask; /* Write Enable Mask */ + struct __qm_mc_cgr cgr; /* CGR fields */ + u8 __reserved2[2]; + u8 cgid; + u8 __reserved4[32]; +} __packed; +struct qm_mcc_cgrtestwrite { + u8 __reserved1[2]; + u8 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */ + u32 i_bcnt_lo; /* low 32-bits of 40-bit */ + u8 __reserved2[23]; + u8 cgid; + u8 __reserved3[32]; +} __packed; +struct qm_mcc_querycgr { + u8 __reserved1[30]; + u8 cgid; + u8 __reserved2[32]; +} __packed; +struct qm_mcc_querycongestion { + u8 __reserved[63]; +} __packed; +struct qm_mcc_querywq { + u8 __reserved; + /* select channel if verb != QUERYWQ_DEDICATED */ + union { + u16 channel_wq; /* ignores wq (3 lsbits) */ + struct { + u16 id:13; /* qm_channel */ + u16 __reserved1:3; + } __packed channel; + }; + u8 __reserved2[60]; +} __packed; + +struct qm_mcc_ceetm_lfqmt_config { + u8 __reserved1[4]; + u32 lfqid:24; + u8 __reserved2[2]; + u16 cqid; + u8 __reserved3[2]; + u16 dctidx; + u8 __reserved4[48]; +} __packed; + +struct qm_mcc_ceetm_lfqmt_query { + u8 __reserved1[4]; + u32 lfqid:24; + u8 __reserved2[56]; +} __packed; + +struct qm_mcc_ceetm_cq_config { + u8 __reserved1; + u16 cqid; + u8 dcpid; + u8 __reserved2; + u16 ccgid; + u8 __reserved3[56]; +} __packed; + +struct qm_mcc_ceetm_cq_query { + u8 __reserved1; + u16 cqid; + u8 dcpid; + u8 __reserved2[59]; +} __packed; + +struct qm_mcc_ceetm_dct_config { + u8 __reserved1; + u16 dctidx; + u8 dcpid; + u8 __reserved2[15]; + u32 context_b; + u64 context_a; + u8 __reserved3[32]; +} __packed; + +struct qm_mcc_ceetm_dct_query { + u8 __reserved1; + u16 dctidx; + u8 dcpid; + u8 __reserved2[59]; +} __packed; + +struct qm_mcc_ceetm_class_scheduler_config { + u8 __reserved1; + u16 cqcid; + u8 dcpid; + u8 __reserved2[6]; + u8 gpc; + u16 crem; + u16 erem; + u8 w[8]; + u8 __reserved3[40]; +} __packed; + +struct qm_mcc_ceetm_class_scheduler_query { + u8 __reserved1; + u16 cqcid; + u8 dcpid; + u8 __reserved2[59]; +} __packed; + +#define CEETM_COMMAND_CHANNEL_MAPPING (0 << 12) +#define CEETM_COMMAND_SP_MAPPING (1 << 12) +#define CEETM_COMMAND_CHANNEL_SHAPER (2 << 12) +#define CEETM_COMMAND_LNI_SHAPER (3 << 12) +#define CEETM_COMMAND_TCFC (4 << 12) + +#define CEETM_CCGRID_MASK 0x01FF +#define CEETM_CCGR_CM_CONFIGURE (0 << 14) +#define CEETM_CCGR_DN_CONFIGURE (1 << 14) +#define CEETM_CCGR_TEST_WRITE (2 << 14) +#define CEETM_CCGR_CM_QUERY (0 << 14) +#define CEETM_CCGR_DN_QUERY (1 << 14) +#define CEETM_CCGR_DN_QUERY_FLUSH (2 << 14) +#define CEETM_QUERY_CONGESTION_STATE (3 << 14) + +struct qm_mcc_ceetm_mapping_shaper_tcfc_config { + u8 __reserved1; + u16 cid; + u8 dcpid; + union { + struct { + u8 map; + u8 __reserved2[58]; + } __packed channel_mapping; + struct { + u8 map_reserved:5; + u8 map_lni_id:3; + u8 __reserved2[58]; + } __packed sp_mapping; + struct { + u8 cpl; + u32 crtcr:24; + u32 ertcr:24; + u16 crtbl; + u16 ertbl; + u8 __reserved2[48]; + } __packed shaper_config; + struct { + u8 __reserved2[11]; + u64 lnitcfcc; + u8 __reserved3[40]; + } __packed tcfc_config; + }; +} __packed; + +struct qm_mcc_ceetm_mapping_shaper_tcfc_query { + u8 __reserved1; + u16 cid; + u8 dcpid; + u8 __reserved2[59]; +} __packed; + +struct qm_mcc_ceetm_ccgr_config { + u8 __reserved1; + u16 ccgrid; + u8 dcpid; + u8 __reserved2; + u16 we_mask; + union { + struct { + u8 ctl; + u8 cdv; + u16 cscn_tupd; + u8 oal; + u8 __reserved3; + struct qm_cgr_cs_thres cs_thres; + struct qm_cgr_cs_thres cs_thres_x; + struct qm_cgr_cs_thres td_thres; + struct qm_cgr_wr_parm wr_parm_g; + struct qm_cgr_wr_parm wr_parm_y; + struct qm_cgr_wr_parm wr_parm_r; + } __packed cm_config; + struct { + u8 dnc; + u8 dn0; + u8 dn1; + u64 dnba:40; + u8 __reserved3[2]; + u16 dnth_0; + u8 __reserved4[2]; + u16 dnth_1; + u8 __reserved5[8]; + } __packed dn_config; + struct { + u8 __reserved3[3]; + u64 i_cnt:40; + u8 __reserved4[16]; + } __packed test_write; + }; + u8 __reserved5[32]; +} __packed; + +struct qm_mcc_ceetm_ccgr_query { + u8 __reserved1; + u16 ccgrid; + u8 dcpid; + u8 __reserved2[59]; +} __packed; + +struct qm_mcc_ceetm_cq_peek_pop_xsfdrread { + u8 __reserved1; + u16 cqid; + u8 dcpid; + u8 ct; + u16 xsfdr; + u8 __reserved2[56]; +} __packed; + +#define CEETM_QUERY_DEQUEUE_STATISTICS 0x00 +#define CEETM_QUERY_DEQUEUE_CLEAR_STATISTICS 0x01 +#define CEETM_WRITE_DEQUEUE_STATISTICS 0x02 +#define CEETM_QUERY_REJECT_STATISTICS 0x03 +#define CEETM_QUERY_REJECT_CLEAR_STATISTICS 0x04 +#define CEETM_WRITE_REJECT_STATISTICS 0x05 +struct qm_mcc_ceetm_statistics_query_write { + u8 __reserved1; + u16 cid; + u8 dcpid; + u8 ct; + u8 __reserved2[13]; + u64 frm_cnt:40; + u16 __reserved3[2]; + u64 byte_cnt:40; + u8 __reserved[32]; +} __packed; + +struct qm_mc_command { + u8 __dont_write_directly__verb; + union { + struct qm_mcc_initfq initfq; + struct qm_mcc_queryfq queryfq; + struct qm_mcc_queryfq_np queryfq_np; + struct qm_mcc_alterfq alterfq; + struct qm_mcc_initcgr initcgr; + struct qm_mcc_cgrtestwrite cgrtestwrite; + struct qm_mcc_querycgr querycgr; + struct qm_mcc_querycongestion querycongestion; + struct qm_mcc_querywq querywq; + struct qm_mcc_ceetm_lfqmt_config lfqmt_config; + struct qm_mcc_ceetm_lfqmt_query lfqmt_query; + struct qm_mcc_ceetm_cq_config cq_config; + struct qm_mcc_ceetm_cq_query cq_query; + struct qm_mcc_ceetm_dct_config dct_config; + struct qm_mcc_ceetm_dct_query dct_query; + struct qm_mcc_ceetm_class_scheduler_config csch_config; + struct qm_mcc_ceetm_class_scheduler_query csch_query; + struct qm_mcc_ceetm_mapping_shaper_tcfc_config mst_config; + struct qm_mcc_ceetm_mapping_shaper_tcfc_query mst_query; + struct qm_mcc_ceetm_ccgr_config ccgr_config; + struct qm_mcc_ceetm_ccgr_query ccgr_query; + struct qm_mcc_ceetm_cq_peek_pop_xsfdrread cq_ppxr; + struct qm_mcc_ceetm_statistics_query_write stats_query_write; + }; +} __packed; +#define QM_MCC_VERB_VBIT 0x80 +#define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */ +#define QM_MCC_VERB_INITFQ_PARKED 0x40 +#define QM_MCC_VERB_INITFQ_SCHED 0x41 +#define QM_MCC_VERB_QUERYFQ 0x44 +#define QM_MCC_VERB_QUERYFQ_NP 0x45 /* "non-programmable" fields */ +#define QM_MCC_VERB_QUERYWQ 0x46 +#define QM_MCC_VERB_QUERYWQ_DEDICATED 0x47 +#define QM_MCC_VERB_ALTER_SCHED 0x48 /* Schedule FQ */ +#define QM_MCC_VERB_ALTER_FE 0x49 /* Force Eligible FQ */ +#define QM_MCC_VERB_ALTER_RETIRE 0x4a /* Retire FQ */ +#define QM_MCC_VERB_ALTER_OOS 0x4b /* Take FQ out of service */ +#define QM_MCC_VERB_ALTER_FQXON 0x4d /* FQ XON */ +#define QM_MCC_VERB_ALTER_FQXOFF 0x4e /* FQ XOFF */ +#define QM_MCC_VERB_INITCGR 0x50 +#define QM_MCC_VERB_MODIFYCGR 0x51 +#define QM_MCC_VERB_CGRTESTWRITE 0x52 +#define QM_MCC_VERB_QUERYCGR 0x58 +#define QM_MCC_VERB_QUERYCONGESTION 0x59 +/* INITFQ-specific flags */ +#define QM_INITFQ_WE_MASK 0x01ff /* 'Write Enable' flags; */ +#define QM_INITFQ_WE_OAC 0x0100 +#define QM_INITFQ_WE_ORPC 0x0080 +#define QM_INITFQ_WE_CGID 0x0040 +#define QM_INITFQ_WE_FQCTRL 0x0020 +#define QM_INITFQ_WE_DESTWQ 0x0010 +#define QM_INITFQ_WE_ICSCRED 0x0008 +#define QM_INITFQ_WE_TDTHRESH 0x0004 +#define QM_INITFQ_WE_CONTEXTB 0x0002 +#define QM_INITFQ_WE_CONTEXTA 0x0001 +/* INITCGR/MODIFYCGR-specific flags */ +#define QM_CGR_WE_MASK 0x07ff /* 'Write Enable Mask'; */ +#define QM_CGR_WE_WR_PARM_G 0x0400 +#define QM_CGR_WE_WR_PARM_Y 0x0200 +#define QM_CGR_WE_WR_PARM_R 0x0100 +#define QM_CGR_WE_WR_EN_G 0x0080 +#define QM_CGR_WE_WR_EN_Y 0x0040 +#define QM_CGR_WE_WR_EN_R 0x0020 +#define QM_CGR_WE_CSCN_EN 0x0010 +#define QM_CGR_WE_CSCN_TARG 0x0008 +#define QM_CGR_WE_CSTD_EN 0x0004 +#define QM_CGR_WE_CS_THRES 0x0002 +#define QM_CGR_WE_MODE 0x0001 + +/* See 1.5.9.7 CEETM Management Commands */ +#define QM_CEETM_VERB_LFQMT_CONFIG 0x70 +#define QM_CEETM_VERB_LFQMT_QUERY 0x71 +#define QM_CEETM_VERB_CQ_CONFIG 0x72 +#define QM_CEETM_VERB_CQ_QUERY 0x73 +#define QM_CEETM_VERB_DCT_CONFIG 0x74 +#define QM_CEETM_VERB_DCT_QUERY 0x75 +#define QM_CEETM_VERB_CLASS_SCHEDULER_CONFIG 0x76 +#define QM_CEETM_VERB_CLASS_SCHEDULER_QUERY 0x77 +#define QM_CEETM_VERB_MAPPING_SHAPER_TCFC_CONFIG 0x78 +#define QM_CEETM_VERB_MAPPING_SHAPER_TCFC_QUERY 0x79 +#define QM_CEETM_VERB_CCGR_CONFIG 0x7A +#define QM_CEETM_VERB_CCGR_QUERY 0x7B +#define QM_CEETM_VERB_CQ_PEEK_POP_XFDRREAD 0x7C +#define QM_CEETM_VERB_STATISTICS_QUERY_WRITE 0x7D + +/* See 1.5.8.5.1: "Initialize FQ" */ +/* See 1.5.8.5.2: "Query FQ" */ +/* See 1.5.8.5.3: "Query FQ Non-Programmable Fields" */ +/* See 1.5.8.5.4: "Alter FQ State Commands " */ +/* See 1.5.8.6.1: "Initialize/Modify CGR" */ +/* See 1.5.8.6.2: "CGR Test Write" */ +/* See 1.5.8.6.3: "Query CGR" */ +/* See 1.5.8.6.4: "Query Congestion Group State" */ +struct qm_mcr_initfq { + u8 __reserved1[62]; +} __packed; +struct qm_mcr_queryfq { + u8 __reserved1[8]; + struct qm_fqd fqd; /* the FQD fields are here */ + u8 __reserved2[30]; +} __packed; +struct qm_mcr_queryfq_np { + u8 __reserved1; + u8 state; /* QM_MCR_NP_STATE_*** */ + u8 __reserved2; + u32 fqd_link:24; + u16 __reserved3:2; + u16 odp_seq:14; + u16 __reserved4:2; + u16 orp_nesn:14; + u16 __reserved5:1; + u16 orp_ea_hseq:15; + u16 __reserved6:1; + u16 orp_ea_tseq:15; + u8 __reserved7; + u32 orp_ea_hptr:24; + u8 __reserved8; + u32 orp_ea_tptr:24; + u8 __reserved9; + u32 pfdr_hptr:24; + u8 __reserved10; + u32 pfdr_tptr:24; + u8 __reserved11[5]; + u8 __reserved12:7; + u8 is:1; + u16 ics_surp; + u32 byte_cnt; + u8 __reserved13; + u32 frm_cnt:24; + u32 __reserved14; + u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */ + u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */ + u16 __reserved15; + u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */ + u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */ + u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */ +} __packed; +struct qm_mcr_alterfq { + u8 fqs; /* Frame Queue Status */ + u8 __reserved1[61]; +} __packed; +struct qm_mcr_initcgr { + u8 __reserved1[62]; +} __packed; +struct qm_mcr_cgrtestwrite { + u16 __reserved1; + struct __qm_mc_cgr cgr; /* CGR fields */ + u8 __reserved2[3]; + u32 __reserved3:24; + u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */ + u32 i_bcnt_lo; /* low 32-bits of 40-bit */ + u32 __reserved4:24; + u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */ + u32 a_bcnt_lo; /* low 32-bits of 40-bit */ + u16 lgt; /* Last Group Tick */ + u16 wr_prob_g; + u16 wr_prob_y; + u16 wr_prob_r; + u8 __reserved5[8]; +} __packed; +struct qm_mcr_querycgr { + u16 __reserved1; + struct __qm_mc_cgr cgr; /* CGR fields */ + u8 __reserved2[3]; + u32 __reserved3:24; + u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */ + u32 i_bcnt_lo; /* low 32-bits of 40-bit */ + u32 __reserved4:24; + u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */ + u32 a_bcnt_lo; /* low 32-bits of 40-bit */ + union { + u32 cscn_targ_swp[4]; + u8 __reserved5[16]; + }; +} __packed; +static inline u64 qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr *q) +{ + return ((u64)q->i_bcnt_hi << 32) | (u64)q->i_bcnt_lo; +} +static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q) +{ + return ((u64)q->a_bcnt_hi << 32) | (u64)q->a_bcnt_lo; +} +static inline u64 qm_mcr_cgrtestwrite_i_get64( + const struct qm_mcr_cgrtestwrite *q) +{ + return ((u64)q->i_bcnt_hi << 32) | (u64)q->i_bcnt_lo; +} +static inline u64 qm_mcr_cgrtestwrite_a_get64( + const struct qm_mcr_cgrtestwrite *q) +{ + return ((u64)q->a_bcnt_hi << 32) | (u64)q->a_bcnt_lo; +} +/* Macro, so we compile better if 'v' isn't always 64-bit */ +#define qm_mcr_querycgr_i_set64(q, v) \ + do { \ + struct qm_mcr_querycgr *__q931 = (fd); \ + __q931->i_bcnt_hi = upper_32_bits(v); \ + __q931->i_bcnt_lo = lower_32_bits(v); \ + } while (0) +#define qm_mcr_querycgr_a_set64(q, v) \ + do { \ + struct qm_mcr_querycgr *__q931 = (fd); \ + __q931->a_bcnt_hi = upper_32_bits(v); \ + __q931->a_bcnt_lo = lower_32_bits(v); \ + } while (0) +struct __qm_mcr_querycongestion { + u32 __state[8]; +}; +struct qm_mcr_querycongestion { + u8 __reserved[30]; + /* Access this struct using QM_MCR_QUERYCONGESTION() */ + struct __qm_mcr_querycongestion state; +} __packed; +struct qm_mcr_querywq { + union { + u16 channel_wq; /* ignores wq (3 lsbits) */ + struct { + u16 id:13; /* qm_channel */ + u16 __reserved:3; + } __packed channel; + }; + u8 __reserved[28]; + u32 wq_len[8]; +} __packed; + +/* QMAN CEETM Management Command Response */ +struct qm_mcr_ceetm_lfqmt_config { + u8 __reserved1[62]; +} __packed; +struct qm_mcr_ceetm_lfqmt_query { + u8 __reserved1[8]; + u16 cqid; + u8 __reserved2[2]; + u16 dctidx; + u8 __reserved3[2]; + u16 ccgid; + u8 __reserved4[44]; +} __packed; + +struct qm_mcr_ceetm_cq_config { + u8 __reserved1[62]; +} __packed; + +struct qm_mcr_ceetm_cq_query { + u8 __reserved1[4]; + u16 ccgid; + u16 state; + u32 pfdr_hptr:24; + u32 pfdr_tptr:24; + u16 od1_xsfdr; + u16 od2_xsfdr; + u16 od3_xsfdr; + u16 od4_xsfdr; + u16 od5_xsfdr; + u16 od6_xsfdr; + u16 ra1_xsfdr; + u16 ra2_xsfdr; + u8 __reserved2; + u32 frm_cnt:24; + u8 __reserved333[28]; +} __packed; + +struct qm_mcr_ceetm_dct_config { + u8 __reserved1[62]; +} __packed; + +struct qm_mcr_ceetm_dct_query { + u8 __reserved1[18]; + u32 context_b; + u64 context_a; + u8 __reserved2[32]; +} __packed; + +struct qm_mcr_ceetm_class_scheduler_config { + u8 __reserved1[62]; +} __packed; + +struct qm_mcr_ceetm_class_scheduler_query { + u8 __reserved1[9]; + u8 gpc; + u16 crem; + u16 erem; + u8 w[8]; + u8 __reserved2[5]; + u32 wbfslist:24; + u32 d8; + u32 d9; + u32 d10; + u32 d11; + u32 d12; + u32 d13; + u32 d14; + u32 d15; +} __packed; + +struct qm_mcr_ceetm_mapping_shaper_tcfc_config { + u16 cid; + u8 __reserved2[60]; +} __packed; + +struct qm_mcr_ceetm_mapping_shaper_tcfc_query { + u16 cid; + u8 __reserved1; + union { + struct { + u8 map; + u8 __reserved2[58]; + } __packed channel_mapping_query; + struct { + u8 map_reserved:5; + u8 map_lni_id:3; + u8 __reserved2[58]; + } __packed sp_mapping_query; + struct { + u8 cpl; + u32 crtcr:24; + u32 ertcr:24; + u16 crtbl; + u16 ertbl; + u8 __reserved2[16]; + u32 crat; + u32 erat; + u8 __reserved3[24]; + } __packed shaper_query; + struct { + u8 __reserved1[11]; + u64 lnitcfcc; + u8 __reserved3[40]; + } __packed tcfc_query; + }; +} __packed; + +struct qm_mcr_ceetm_ccgr_config { + u8 __reserved1[46]; + union { + u8 __reserved2[8]; + struct { + u16 timestamp; + u16 wr_porb_g; + u16 wr_prob_y; + u16 wr_prob_r; + } __packed test_write; + }; + u8 __reserved3[8]; +} __packed; + +struct qm_mcr_ceetm_ccgr_query { + u8 __reserved1[6]; + union { + struct { + u8 ctl; + u8 cdv; + u8 __reserved2[2]; + u8 oal; + u8 __reserved3; + struct qm_cgr_cs_thres cs_thres; + struct qm_cgr_cs_thres cs_thres_x; + struct qm_cgr_cs_thres td_thres; + struct qm_cgr_wr_parm wr_parm_g; + struct qm_cgr_wr_parm wr_parm_y; + struct qm_cgr_wr_parm wr_parm_r; + u8 cscn_targ_dcp; + u16 dcp_lsn; + u64 i_cnt:40; + u8 __reserved4[3]; + u64 a_cnt:40; + u64 cscn_targ_swp[2]; + } __packed cm_query; + struct { + u8 dnc; + u8 dn0; + u8 dn1; + u64 dnba:40; + u8 __reserved2[2]; + u16 dnth_0; + u8 __reserved3[2]; + u16 dnth_1; + u8 __reserved4[10]; + u16 dnacc_0; + u8 __reserved5[2]; + u16 dnacc_1; + u8 __reserved6[24]; + } __packed dn_query; + struct { + u8 __reserved2[24]; + u16 ccg_state[16]; + } __packed congestion_state; + + }; +} __packed; + +struct qm_mcr_ceetm_cq_peek_pop_xsfdrread { + u8 stat; + u8 __reserved1[11]; + u16 dctidx; + struct qm_fd fd; + u8 __reserved2[32]; +} __packed; + +struct qm_mcr_ceetm_statistics_query { + u8 __reserved1[15]; + u64 frm_cnt:40; + u8 __reserved2[2]; + u64 byte_cnt:40; + u8 __reserved3[32]; +} __packed; + +struct qm_mc_result { + u8 verb; + u8 result; + union { + struct qm_mcr_initfq initfq; + struct qm_mcr_queryfq queryfq; + struct qm_mcr_queryfq_np queryfq_np; + struct qm_mcr_alterfq alterfq; + struct qm_mcr_initcgr initcgr; + struct qm_mcr_cgrtestwrite cgrtestwrite; + struct qm_mcr_querycgr querycgr; + struct qm_mcr_querycongestion querycongestion; + struct qm_mcr_querywq querywq; + struct qm_mcr_ceetm_lfqmt_config lfqmt_config; + struct qm_mcr_ceetm_lfqmt_query lfqmt_query; + struct qm_mcr_ceetm_cq_config cq_config; + struct qm_mcr_ceetm_cq_query cq_query; + struct qm_mcr_ceetm_dct_config dct_config; + struct qm_mcr_ceetm_dct_query dct_query; + struct qm_mcr_ceetm_class_scheduler_config csch_config; + struct qm_mcr_ceetm_class_scheduler_query csch_query; + struct qm_mcr_ceetm_mapping_shaper_tcfc_config mst_config; + struct qm_mcr_ceetm_mapping_shaper_tcfc_query mst_query; + struct qm_mcr_ceetm_ccgr_config ccgr_config; + struct qm_mcr_ceetm_ccgr_query ccgr_query; + struct qm_mcr_ceetm_cq_peek_pop_xsfdrread cq_ppxr; + struct qm_mcr_ceetm_statistics_query stats_query; + }; +} __packed; + +#define QM_MCR_VERB_RRID 0x80 +#define QM_MCR_VERB_MASK QM_MCC_VERB_MASK +#define QM_MCR_VERB_INITFQ_PARKED QM_MCC_VERB_INITFQ_PARKED +#define QM_MCR_VERB_INITFQ_SCHED QM_MCC_VERB_INITFQ_SCHED +#define QM_MCR_VERB_QUERYFQ QM_MCC_VERB_QUERYFQ +#define QM_MCR_VERB_QUERYFQ_NP QM_MCC_VERB_QUERYFQ_NP +#define QM_MCR_VERB_QUERYWQ QM_MCC_VERB_QUERYWQ +#define QM_MCR_VERB_QUERYWQ_DEDICATED QM_MCC_VERB_QUERYWQ_DEDICATED +#define QM_MCR_VERB_ALTER_SCHED QM_MCC_VERB_ALTER_SCHED +#define QM_MCR_VERB_ALTER_FE QM_MCC_VERB_ALTER_FE +#define QM_MCR_VERB_ALTER_RETIRE QM_MCC_VERB_ALTER_RETIRE +#define QM_MCR_VERB_ALTER_OOS QM_MCC_VERB_ALTER_OOS +#define QM_MCR_RESULT_NULL 0x00 +#define QM_MCR_RESULT_OK 0xf0 +#define QM_MCR_RESULT_ERR_FQID 0xf1 +#define QM_MCR_RESULT_ERR_FQSTATE 0xf2 +#define QM_MCR_RESULT_ERR_NOTEMPTY 0xf3 /* OOS fails if FQ is !empty */ +#define QM_MCR_RESULT_ERR_BADCHANNEL 0xf4 +#define QM_MCR_RESULT_PENDING 0xf8 +#define QM_MCR_RESULT_ERR_BADCOMMAND 0xff +#define QM_MCR_NP_STATE_FE 0x10 +#define QM_MCR_NP_STATE_R 0x08 +#define QM_MCR_NP_STATE_MASK 0x07 /* Reads FQD::STATE; */ +#define QM_MCR_NP_STATE_OOS 0x00 +#define QM_MCR_NP_STATE_RETIRED 0x01 +#define QM_MCR_NP_STATE_TEN_SCHED 0x02 +#define QM_MCR_NP_STATE_TRU_SCHED 0x03 +#define QM_MCR_NP_STATE_PARKED 0x04 +#define QM_MCR_NP_STATE_ACTIVE 0x05 +#define QM_MCR_NP_PTR_MASK 0x07ff /* for RA[12] & OD[123] */ +#define QM_MCR_NP_RA1_NRA(v) (((v) >> 14) & 0x3) /* FQD::NRA */ +#define QM_MCR_NP_RA2_IT(v) (((v) >> 14) & 0x1) /* FQD::IT */ +#define QM_MCR_NP_OD1_NOD(v) (((v) >> 14) & 0x3) /* FQD::NOD */ +#define QM_MCR_NP_OD3_NPC(v) (((v) >> 14) & 0x3) /* FQD::NPC */ +#define QM_MCR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */ +#define QM_MCR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */ +/* This extracts the state for congestion group 'n' from a query response. + * Eg. + * u8 cgr = [...]; + * struct qm_mc_result *res = [...]; + * printf("congestion group %d congestion state: %d\n", cgr, + * QM_MCR_QUERYCONGESTION(&res->querycongestion.state, cgr)); + */ +#define __CGR_WORD(num) (num >> 5) +#define __CGR_SHIFT(num) (num & 0x1f) +#define __CGR_NUM (sizeof(struct __qm_mcr_querycongestion) << 3) +static inline int QM_MCR_QUERYCONGESTION(struct __qm_mcr_querycongestion *p, + u8 cgr) +{ + return p->__state[__CGR_WORD(cgr)] & (0x80000000 >> __CGR_SHIFT(cgr)); +} + + +/*********************/ +/* Utility interface */ +/*********************/ + +/* Represents an allocator over a range of FQIDs. NB, accesses are not locked, + * spinlock them yourself if needed. */ +struct qman_fqid_pool; + +/* Create/destroy a FQID pool, num must be a multiple of 32. NB, _destroy() + * always succeeds, but returns non-zero if there were "leaked" FQID + * allocations. */ +struct qman_fqid_pool *qman_fqid_pool_create(u32 fqid_start, u32 num); +int qman_fqid_pool_destroy(struct qman_fqid_pool *pool); +/* Alloc/free a FQID from the range. _alloc() returns zero for success. */ +int qman_fqid_pool_alloc(struct qman_fqid_pool *pool, u32 *fqid); +void qman_fqid_pool_free(struct qman_fqid_pool *pool, u32 fqid); +u32 qman_fqid_pool_used(struct qman_fqid_pool *pool); + +/*******************************************************************/ +/* Managed (aka "shared" or "mux/demux") portal, high-level i/face */ +/*******************************************************************/ + + /* Portal and Frame Queues */ + /* ----------------------- */ +/* Represents a managed portal */ +struct qman_portal; + +/* This object type represents Qman frame queue descriptors (FQD), it is + * cacheline-aligned, and initialised by qman_create_fq(). The structure is + * defined further down. */ +struct qman_fq; + +/* This object type represents a Qman congestion group, it is defined further + * down. */ +struct qman_cgr; + +struct qman_portal_config { + /* If the caller enables DQRR stashing (and thus wishes to operate the + * portal from only one cpu), this is the logical CPU that the portal + * will stash to. Whether stashing is enabled or not, this setting is + * also used for any "core-affine" portals, ie. default portals + * associated to the corresponding cpu. -1 implies that there is no core + * affinity configured. */ + int cpu; + /* portal interrupt line */ + int irq; + /* the unique index of this portal */ + u32 index; + /* Is this portal shared? (If so, it has coarser locking and demuxes + * processing on behalf of other CPUs.) */ + int is_shared; + /* The portal's dedicated channel id, use this value for initialising + * frame queues to target this portal when scheduled. */ + u16 channel; + /* A mask of which pool channels this portal has dequeue access to + * (using QM_SDQCR_CHANNELS_POOL(n) for the bitmask) */ + u32 pools; +}; + +/* This enum, and the callback type that returns it, are used when handling + * dequeued frames via DQRR. Note that for "null" callbacks registered with the + * portal object (for handling dequeues that do not demux because contextB is + * NULL), the return value *MUST* be qman_cb_dqrr_consume. */ +enum qman_cb_dqrr_result { + /* DQRR entry can be consumed */ + qman_cb_dqrr_consume, + /* Like _consume, but requests parking - FQ must be held-active */ + qman_cb_dqrr_park, + /* Does not consume, for DCA mode only. This allows out-of-order + * consumes by explicit calls to qman_dca() and/or the use of implicit + * DCA via EQCR entries. */ + qman_cb_dqrr_defer, + /* Stop processing without consuming this ring entry. Exits the current + * qman_poll_dqrr() or interrupt-handling, as appropriate. If within an + * interrupt handler, the callback would typically call + * qman_irqsource_remove(QM_PIRQ_DQRI) before returning this value, + * otherwise the interrupt will reassert immediately. */ + qman_cb_dqrr_stop, + /* Like qman_cb_dqrr_stop, but consumes the current entry. */ + qman_cb_dqrr_consume_stop +}; +typedef enum qman_cb_dqrr_result (*qman_cb_dqrr)(struct qman_portal *qm, + struct qman_fq *fq, + const struct qm_dqrr_entry *dqrr); + +/* This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They + * are always consumed after the callback returns. */ +typedef void (*qman_cb_mr)(struct qman_portal *qm, struct qman_fq *fq, + const struct qm_mr_entry *msg); + +/* This callback type is used when handling DCP ERNs */ +typedef void (*qman_cb_dc_ern)(struct qman_portal *qm, + const struct qm_mr_entry *msg); + +/* s/w-visible states. Ie. tentatively scheduled + truly scheduled + active + + * held-active + held-suspended are just "sched". Things like "retired" will not + * be assumed until it is complete (ie. QMAN_FQ_STATE_CHANGING is set until + * then, to indicate it's completing and to gate attempts to retry the retire + * command). Note, park commands do not set QMAN_FQ_STATE_CHANGING because it's + * technically impossible in the case of enqueue DCAs (which refer to DQRR ring + * index rather than the FQ that ring entry corresponds to), so repeated park + * commands are allowed (if you're silly enough to try) but won't change FQ + * state, and the resulting park notifications move FQs from "sched" to + * "parked". */ +enum qman_fq_state { + qman_fq_state_oos, + qman_fq_state_parked, + qman_fq_state_sched, + qman_fq_state_retired +}; + +/* Frame queue objects (struct qman_fq) are stored within memory passed to + * qman_create_fq(), as this allows stashing of caller-provided demux callback + * pointers at no extra cost to stashing of (driver-internal) FQ state. If the + * caller wishes to add per-FQ state and have it benefit from dequeue-stashing, + * they should; + * + * (a) extend the qman_fq structure with their state; eg. + * + * // myfq is allocated and driver_fq callbacks filled in; + * struct my_fq { + * struct qman_fq base; + * int an_extra_field; + * [ ... add other fields to be associated with each FQ ...] + * } *myfq = some_my_fq_allocator(); + * struct qman_fq *fq = qman_create_fq(fqid, flags, &myfq->base); + * + * // in a dequeue callback, access extra fields from 'fq' via a cast; + * struct my_fq *myfq = (struct my_fq *)fq; + * do_something_with(myfq->an_extra_field); + * [...] + * + * (b) when and if configuring the FQ for context stashing, specify how ever + * many cachelines are required to stash 'struct my_fq', to accelerate not + * only the Qman driver but the callback as well. + */ + +struct qman_fq_cb { + qman_cb_dqrr dqrr; /* for dequeued frames */ + qman_cb_mr ern; /* for s/w ERNs */ + qman_cb_mr fqs; /* frame-queue state changes*/ +}; + +struct qman_fq { + /* Caller of qman_create_fq() provides these demux callbacks */ + struct qman_fq_cb cb; + /* These are internal to the driver, don't touch. In particular, they + * may change, be removed, or extended (so you shouldn't rely on + * sizeof(qman_fq) being a constant). */ + spinlock_t fqlock; + u32 fqid; + volatile unsigned long flags; + enum qman_fq_state state; + int cgr_groupid; + struct rb_node node; +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP + u32 key; +#endif +}; + +/* This callback type is used when handling congestion group entry/exit. + * 'congested' is non-zero on congestion-entry, and zero on congestion-exit. */ +typedef void (*qman_cb_cgr)(struct qman_portal *qm, + struct qman_cgr *cgr, int congested); + +struct qman_cgr { + /* Set these prior to qman_create_cgr() */ + u32 cgrid; /* 0..255, but u32 to allow specials like -1, 256, etc.*/ + qman_cb_cgr cb; + /* These are private to the driver */ + u16 chan; /* portal channel this object is created on */ + struct list_head node; +}; + +/* Flags to qman_create_fq() */ +#define QMAN_FQ_FLAG_NO_ENQUEUE 0x00000001 /* can't enqueue */ +#define QMAN_FQ_FLAG_NO_MODIFY 0x00000002 /* can only enqueue */ +#define QMAN_FQ_FLAG_TO_DCPORTAL 0x00000004 /* consumed by CAAM/PME/Fman */ +#define QMAN_FQ_FLAG_LOCKED 0x00000008 /* multi-core locking */ +#define QMAN_FQ_FLAG_AS_IS 0x00000010 /* query h/w state */ +#define QMAN_FQ_FLAG_DYNAMIC_FQID 0x00000020 /* (de)allocate fqid */ + +/* Flags to qman_destroy_fq() */ +#define QMAN_FQ_DESTROY_PARKED 0x00000001 /* FQ can be parked or OOS */ + +/* Flags from qman_fq_state() */ +#define QMAN_FQ_STATE_CHANGING 0x80000000 /* 'state' is changing */ +#define QMAN_FQ_STATE_NE 0x40000000 /* retired FQ isn't empty */ +#define QMAN_FQ_STATE_ORL 0x20000000 /* retired FQ has ORL */ +#define QMAN_FQ_STATE_BLOCKOOS 0xe0000000 /* if any are set, no OOS */ +#define QMAN_FQ_STATE_CGR_EN 0x10000000 /* CGR enabled */ +#define QMAN_FQ_STATE_VDQCR 0x08000000 /* being volatile dequeued */ + +/* Flags to qman_init_fq() */ +#define QMAN_INITFQ_FLAG_SCHED 0x00000001 /* schedule rather than park */ +#define QMAN_INITFQ_FLAG_LOCAL 0x00000004 /* set dest portal */ + +/* Flags to qman_volatile_dequeue() */ +#ifdef CONFIG_FSL_DPA_CAN_WAIT +#define QMAN_VOLATILE_FLAG_WAIT 0x00000001 /* wait if VDQCR is in use */ +#define QMAN_VOLATILE_FLAG_WAIT_INT 0x00000002 /* if wait, interruptible? */ +#define QMAN_VOLATILE_FLAG_FINISH 0x00000004 /* wait till VDQCR completes */ +#endif + +/* Flags to qman_enqueue(). NB, the strange numbering is to align with hardware, + * bit-wise. (NB: the PME API is sensitive to these precise numberings too, so + * any change here should be audited in PME.) */ +#ifdef CONFIG_FSL_DPA_CAN_WAIT +#define QMAN_ENQUEUE_FLAG_WAIT 0x00010000 /* wait if EQCR is full */ +#define QMAN_ENQUEUE_FLAG_WAIT_INT 0x00020000 /* if wait, interruptible? */ +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC +#define QMAN_ENQUEUE_FLAG_WAIT_SYNC 0x00000004 /* if wait, until consumed? */ +#endif +#endif +#define QMAN_ENQUEUE_FLAG_WATCH_CGR 0x00080000 /* watch congestion state */ +#define QMAN_ENQUEUE_FLAG_DCA 0x00008000 /* perform enqueue-DCA */ +#define QMAN_ENQUEUE_FLAG_DCA_PARK 0x00004000 /* If DCA, requests park */ +#define QMAN_ENQUEUE_FLAG_DCA_PTR(p) /* If DCA, p is DQRR entry */ \ + (((u32)(p) << 2) & 0x00000f00) +#define QMAN_ENQUEUE_FLAG_C_GREEN 0x00000000 /* choose one C_*** flag */ +#define QMAN_ENQUEUE_FLAG_C_YELLOW 0x00000008 +#define QMAN_ENQUEUE_FLAG_C_RED 0x00000010 +#define QMAN_ENQUEUE_FLAG_C_OVERRIDE 0x00000018 +/* For the ORP-specific qman_enqueue_orp() variant; + * - this flag indicates "Not Last In Sequence", ie. all but the final fragment + * of a frame. */ +#define QMAN_ENQUEUE_FLAG_NLIS 0x01000000 +/* - this flag performs no enqueue but fills in an ORP sequence number that + * would otherwise block it (eg. if a frame has been dropped). */ +#define QMAN_ENQUEUE_FLAG_HOLE 0x02000000 +/* - this flag performs no enqueue but advances NESN to the given sequence + * number. */ +#define QMAN_ENQUEUE_FLAG_NESN 0x04000000 + +/* Flags to qman_modify_cgr() */ +#define QMAN_CGR_FLAG_USE_INIT 0x00000001 +#define QMAN_CGR_MODE_FRAME 0x00000001 + + /* Portal Management */ + /* ----------------- */ +/** + * qman_get_portal_config - get portal configuration settings + * + * This returns a read-only view of the current cpu's affine portal settings. + */ +const struct qman_portal_config *qman_get_portal_config(void); + +/** + * qman_irqsource_get - return the portal work that is interrupt-driven + * + * Returns a bitmask of QM_PIRQ_**I processing sources that are currently + * enabled for interrupt handling on the current cpu's affine portal. These + * sources will trigger the portal interrupt and the interrupt handler (or a + * tasklet/bottom-half it defers to) will perform the corresponding processing + * work. The qman_poll_***() functions will only process sources that are not in + * this bitmask. If the current CPU is sharing a portal hosted on another CPU, + * this always returns zero. + */ +u32 qman_irqsource_get(void); + +/** + * qman_irqsource_add - add processing sources to be interrupt-driven + * @bits: bitmask of QM_PIRQ_**I processing sources + * + * Adds processing sources that should be interrupt-driven (rather than + * processed via qman_poll_***() functions). Returns zero for success, or + * -EINVAL if the current CPU is sharing a portal hosted on another CPU. + */ +int qman_irqsource_add(u32 bits); + +/** + * qman_irqsource_remove - remove processing sources from being interrupt-driven + * @bits: bitmask of QM_PIRQ_**I processing sources + * + * Removes processing sources from being interrupt-driven, so that they will + * instead be processed via qman_poll_***() functions. Returns zero for success, + * or -EINVAL if the current CPU is sharing a portal hosted on another CPU. + */ +int qman_irqsource_remove(u32 bits); + +/** + * qman_affine_cpus - return a mask of cpus that have affine portals + */ +const cpumask_t *qman_affine_cpus(void); + +/** + * qman_affine_channel - return the channel ID of an portal + * @cpu: the cpu whose affine portal is the subject of the query + * + * If @cpu is -1, the affine portal for the current CPU will be used. It is a + * bug to call this function for any value of @cpu (other than -1) that is not a + * member of the mask returned from qman_affine_cpus(). + */ +u16 qman_affine_channel(int cpu); + +/** + * qman_poll_dqrr - process DQRR (fast-path) entries + * @limit: the maximum number of DQRR entries to process + * + * Use of this function requires that DQRR processing not be interrupt-driven. + * Ie. the value returned by qman_irqsource_get() should not include + * QM_PIRQ_DQRI. If the current CPU is sharing a portal hosted on another CPU, + * this function will return -EINVAL, otherwise the return value is >=0 and + * represents the number of DQRR entries processed. + */ +int qman_poll_dqrr(unsigned int limit); + +/** + * qman_poll_slow - process anything (except DQRR) that isn't interrupt-driven. + * + * This function does any portal processing that isn't interrupt-driven. If the + * current CPU is sharing a portal hosted on another CPU, this function will + * return (u32)-1, otherwise the return value is a bitmask of QM_PIRQ_* sources + * indicating what interrupt sources were actually processed by the call. + */ +u32 qman_poll_slow(void); + +/** + * qman_poll - legacey wrapper for qman_poll_dqrr() and qman_poll_slow() + * + * Dispatcher logic on a cpu can use this to trigger any maintenance of the + * affine portal. There are two classes of portal processing in question; + * fast-path (which involves demuxing dequeue ring (DQRR) entries and tracking + * enqueue ring (EQCR) consumption), and slow-path (which involves EQCR + * thresholds, congestion state changes, etc). This function does whatever + * processing is not triggered by interrupts. + * + * Note, if DQRR and some slow-path processing are poll-driven (rather than + * interrupt-driven) then this function uses a heuristic to determine how often + * to run slow-path processing - as slow-path processing introduces at least a + * minimum latency each time it is run, whereas fast-path (DQRR) processing is + * close to zero-cost if there is no work to be done. Applications can tune this + * behaviour themselves by using qman_poll_dqrr() and qman_poll_slow() directly + * rather than going via this wrapper. + */ +void qman_poll(void); + +/** + * qman_stop_dequeues - Stop h/w dequeuing to the s/w portal + * + * Disables DQRR processing of the portal. This is reference-counted, so + * qman_start_dequeues() must be called as many times as qman_stop_dequeues() to + * truly re-enable dequeuing. + */ +void qman_stop_dequeues(void); + +/** + * qman_start_dequeues - (Re)start h/w dequeuing to the s/w portal + * + * Enables DQRR processing of the portal. This is reference-counted, so + * qman_start_dequeues() must be called as many times as qman_stop_dequeues() to + * truly re-enable dequeuing. + */ +void qman_start_dequeues(void); + +/** + * qman_static_dequeue_add - Add pool channels to the portal SDQCR + * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n) + * + * Adds a set of pool channels to the portal's static dequeue command register + * (SDQCR). The requested pools are limited to those the portal has dequeue + * access to. + */ +void qman_static_dequeue_add(u32 pools); + +/** + * qman_static_dequeue_del - Remove pool channels from the portal SDQCR + * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n) + * + * Removes a set of pool channels from the portal's static dequeue command + * register (SDQCR). The requested pools are limited to those the portal has + * dequeue access to. + */ +void qman_static_dequeue_del(u32 pools); + +/** + * qman_static_dequeue_get - return the portal's current SDQCR + * + * Returns the portal's current static dequeue command register (SDQCR). The + * entire register is returned, so if only the currently-enabled pool channels + * are desired, mask the return value with QM_SDQCR_CHANNELS_POOL_MASK. + */ +u32 qman_static_dequeue_get(void); + +/** + * qman_dca - Perform a Discrete Consumption Acknowledgement + * @dq: the DQRR entry to be consumed + * @park_request: indicates whether the held-active @fq should be parked + * + * Only allowed in DCA-mode portals, for DQRR entries whose handler callback had + * previously returned 'qman_cb_dqrr_defer'. NB, as with the other APIs, this + * does not take a 'portal' argument but implies the core affine portal from the + * cpu that is currently executing the function. For reasons of locking, this + * function must be called from the same CPU as that which processed the DQRR + * entry in the first place. + */ +void qman_dca(struct qm_dqrr_entry *dq, int park_request); + +/** + * qman_eqcr_is_empty - Determine if portal's EQCR is empty + * + * For use in situations where a cpu-affine caller needs to determine when all + * enqueues for the local portal have been processed by Qman but can't use the + * QMAN_ENQUEUE_FLAG_WAIT_SYNC flag to do this from the final qman_enqueue(). + * The function forces tracking of EQCR consumption (which normally doesn't + * happen until enqueue processing needs to find space to put new enqueue + * commands), and returns zero if the ring still has unprocessed entries, + * non-zero if it is empty. + */ +int qman_eqcr_is_empty(void); + +/** + * qman_set_dc_ern - Set the handler for DCP enqueue rejection notifications + * @handler: callback for processing DCP ERNs + * @affine: whether this handler is specific to the locally affine portal + * + * If a hardware block's interface to Qman (ie. its direct-connect portal, or + * DCP) is configured not to receive enqueue rejections, then any enqueues + * through that DCP that are rejected will be sent to a given software portal. + * If @affine is non-zero, then this handler will only be used for DCP ERNs + * received on the portal affine to the current CPU. If multiple CPUs share a + * portal and they all call this function, they will be setting the handler for + * the same portal! If @affine is zero, then this handler will be global to all + * portals handled by this instance of the driver. Only those portals that do + * not have their own affine handler will use the global handler. + */ +void qman_set_dc_ern(qman_cb_dc_ern handler, int affine); + + /* FQ management */ + /* ------------- */ +/** + * qman_create_fq - Allocates a FQ + * @fqid: the index of the FQD to encapsulate, must be "Out of Service" + * @flags: bit-mask of QMAN_FQ_FLAG_*** options + * @fq: memory for storing the 'fq', with callbacks filled in + * + * Creates a frame queue object for the given @fqid, unless the + * QMAN_FQ_FLAG_DYNAMIC_FQID flag is set in @flags, in which case a FQID is + * dynamically allocated (or the function fails if none are available). Once + * created, the caller should not touch the memory at 'fq' except as extended to + * adjacent memory for user-defined fields (see the definition of "struct + * qman_fq" for more info). NO_MODIFY is only intended for enqueuing to + * pre-existing frame-queues that aren't to be otherwise interfered with, it + * prevents all other modifications to the frame queue. The TO_DCPORTAL flag + * causes the driver to honour any contextB modifications requested in the + * qm_init_fq() API, as this indicates the frame queue will be consumed by a + * direct-connect portal (PME, CAAM, or Fman). When frame queues are consumed by + * software portals, the contextB field is controlled by the driver and can't be + * modified by the caller. If the AS_IS flag is specified, management commands + * will be used on portal @p to query state for frame queue @fqid and construct + * a frame queue object based on that, rather than assuming/requiring that it be + * Out of Service. + */ +int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq); + +/** + * qman_destroy_fq - Deallocates a FQ + * @fq: the frame queue object to release + * @flags: bit-mask of QMAN_FQ_FREE_*** options + * + * The memory for this frame queue object ('fq' provided in qman_create_fq()) is + * not deallocated but the caller regains ownership, to do with as desired. The + * FQ must be in the 'out-of-service' state unless the QMAN_FQ_FREE_PARKED flag + * is specified, in which case it may also be in the 'parked' state. + */ +void qman_destroy_fq(struct qman_fq *fq, u32 flags); + +/** + * qman_fq_fqid - Queries the frame queue ID of a FQ object + * @fq: the frame queue object to query + */ +u32 qman_fq_fqid(struct qman_fq *fq); + +/** + * qman_fq_state - Queries the state of a FQ object + * @fq: the frame queue object to query + * @state: pointer to state enum to return the FQ scheduling state + * @flags: pointer to state flags to receive QMAN_FQ_STATE_*** bitmask + * + * Queries the state of the FQ object, without performing any h/w commands. + * This captures the state, as seen by the driver, at the time the function + * executes. + */ +void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags); + +/** + * qman_init_fq - Initialises FQ fields, leaves the FQ "parked" or "scheduled" + * @fq: the frame queue object to modify, must be 'parked' or new. + * @flags: bit-mask of QMAN_INITFQ_FLAG_*** options + * @opts: the FQ-modification settings, as defined in the low-level API + * + * The @opts parameter comes from the low-level portal API. Select + * QMAN_INITFQ_FLAG_SCHED in @flags to cause the frame queue to be scheduled + * rather than parked. NB, @opts can be NULL. + * + * Note that some fields and options within @opts may be ignored or overwritten + * by the driver; + * 1. the 'count' and 'fqid' fields are always ignored (this operation only + * affects one frame queue: @fq). + * 2. the QM_INITFQ_WE_CONTEXTB option of the 'we_mask' field and the associated + * 'fqd' structure's 'context_b' field are sometimes overwritten; + * - if @fq was not created with QMAN_FQ_FLAG_TO_DCPORTAL, then context_b is + * initialised to a value used by the driver for demux. + * - if context_b is initialised for demux, so is context_a in case stashing + * is requested (see item 4). + * (So caller control of context_b is only possible for TO_DCPORTAL frame queue + * objects.) + * 3. if @flags contains QMAN_INITFQ_FLAG_LOCAL, the 'fqd' structure's + * 'dest::channel' field will be overwritten to match the portal used to issue + * the command. If the WE_DESTWQ write-enable bit had already been set by the + * caller, the channel workqueue will be left as-is, otherwise the write-enable + * bit is set and the workqueue is set to a default of 4. If the "LOCAL" flag + * isn't set, the destination channel/workqueue fields and the write-enable bit + * are left as-is. + * 4. if the driver overwrites context_a/b for demux, then if + * QM_INITFQ_WE_CONTEXTA is set, the driver will only overwrite + * context_a.address fields and will leave the stashing fields provided by the + * user alone, otherwise it will zero out the context_a.stashing fields. + */ +int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts); + +/** + * qman_schedule_fq - Schedules a FQ + * @fq: the frame queue object to schedule, must be 'parked' + * + * Schedules the frame queue, which must be Parked, which takes it to + * Tentatively-Scheduled or Truly-Scheduled depending on its fill-level. + */ +int qman_schedule_fq(struct qman_fq *fq); + +/** + * qman_retire_fq - Retires a FQ + * @fq: the frame queue object to retire + * @flags: FQ flags (as per qman_fq_state) if retirement completes immediately + * + * Retires the frame queue. This returns zero if it succeeds immediately, +1 if + * the retirement was started asynchronously, otherwise it returns negative for + * failure. When this function returns zero, @flags is set to indicate whether + * the retired FQ is empty and/or whether it has any ORL fragments (to show up + * as ERNs). Otherwise the corresponding flags will be known when a subsequent + * FQRN message shows up on the portal's message ring. + * + * NB, if the retirement is asynchronous (the FQ was in the Truly Scheduled or + * Active state), the completion will be via the message ring as a FQRN - but + * the corresponding callback may occur before this function returns!! Ie. the + * caller should be prepared to accept the callback as the function is called, + * not only once it has returned. + */ +int qman_retire_fq(struct qman_fq *fq, u32 *flags); + +/** + * qman_oos_fq - Puts a FQ "out of service" + * @fq: the frame queue object to be put out-of-service, must be 'retired' + * + * The frame queue must be retired and empty, and if any order restoration list + * was released as ERNs at the time of retirement, they must all be consumed. + */ +int qman_oos_fq(struct qman_fq *fq); + +/** + * qman_fq_flow_control - Set the XON/XOFF state of a FQ + * @fq: the frame queue object to be set to XON/XOFF state, must not be 'oos', + * or 'retired' or 'parked' state + * @xon: boolean to set fq in XON or XOFF state + * + * The frame should be in Tentatively Scheduled state or Truly Schedule sate, + * otherwise the IFSI interrupt will be asserted. + */ +int qman_fq_flow_control(struct qman_fq *fq, int xon); + +/** + * qman_query_fq - Queries FQD fields (via h/w query command) + * @fq: the frame queue object to be queried + * @fqd: storage for the queried FQD fields + */ +int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd); + +/** + * qman_query_fq_np - Queries non-programmable FQD fields + * @fq: the frame queue object to be queried + * @np: storage for the queried FQD fields + */ +int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np); + +/** + * qman_query_wq - Queries work queue lengths + * @query_dedicated: If non-zero, query length of WQs in the channel dedicated + * to this software portal. Otherwise, query length of WQs in a + * channel specified in wq. + * @wq: storage for the queried WQs lengths. Also specified the channel to + * to query if query_dedicated is zero. + */ +int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq); + +/** + * qman_volatile_dequeue - Issue a volatile dequeue command + * @fq: the frame queue object to dequeue from + * @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options + * @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set() + * + * Attempts to lock access to the portal's VDQCR volatile dequeue functionality. + * The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and + * the VDQCR is already in use, otherwise returns non-zero for failure. If + * QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once + * the VDQCR command has finished executing (ie. once the callback for the last + * DQRR entry resulting from the VDQCR command has been called). If not using + * the FINISH flag, completion can be determined either by detecting the + * presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits + * in the "stat" field of the "struct qm_dqrr_entry" passed to the FQ's dequeue + * callback, or by waiting for the QMAN_FQ_STATE_VDQCR bit to disappear from the + * "flags" retrieved from qman_fq_state(). + */ +int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr); + +/** + * qman_enqueue - Enqueue a frame to a frame queue + * @fq: the frame queue object to enqueue to + * @fd: a descriptor of the frame to be enqueued + * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options + * + * Fills an entry in the EQCR of portal @qm to enqueue the frame described by + * @fd. The descriptor details are copied from @fd to the EQCR entry, the 'pid' + * field is ignored. The return value is non-zero on error, such as ring full + * (and FLAG_WAIT not specified), congestion avoidance (FLAG_WATCH_CGR + * specified), etc. If the ring is full and FLAG_WAIT is specified, this + * function will block. If FLAG_INTERRUPT is set, the EQCI bit of the portal + * interrupt will assert when Qman consumes the EQCR entry (subject to "status + * disable", "enable", and "inhibit" registers). If FLAG_DCA is set, Qman will + * perform an implied "discrete consumption acknowledgement" on the dequeue + * ring's (DQRR) entry, at the ring index specified by the FLAG_DCA_IDX(x) + * macro. (As an alternative to issuing explicit DCA actions on DQRR entries, + * this implicit DCA can delay the release of a "held active" frame queue + * corresponding to a DQRR entry until Qman consumes the EQCR entry - providing + * order-preservation semantics in packet-forwarding scenarios.) If FLAG_DCA is + * set, then FLAG_DCA_PARK can also be set to imply that the DQRR consumption + * acknowledgement should "park request" the "held active" frame queue. Ie. + * when the portal eventually releases that frame queue, it will be left in the + * Parked state rather than Tentatively Scheduled or Truly Scheduled. If the + * portal is watching congestion groups, the QMAN_ENQUEUE_FLAG_WATCH_CGR flag + * is requested, and the FQ is a member of a congestion group, then this + * function returns -EAGAIN if the congestion group is currently congested. + * Note, this does not eliminate ERNs, as the async interface means we can be + * sending enqueue commands to an un-congested FQ that becomes congested before + * the enqueue commands are processed, but it does minimise needless thrashing + * of an already busy hardware resource by throttling many of the to-be-dropped + * enqueues "at the source". + */ +int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags); + +/** + * qman_enqueue_orp - Enqueue a frame to a frame queue using an ORP + * @fq: the frame queue object to enqueue to + * @fd: a descriptor of the frame to be enqueued + * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options + * @orp: the frame queue object used as an order restoration point. + * @orp_seqnum: the sequence number of this frame in the order restoration path + * + * Similar to qman_enqueue(), but with the addition of an Order Restoration + * Point (@orp) and corresponding sequence number (@orp_seqnum) for this + * enqueue operation to employ order restoration. Each frame queue object acts + * as an Order Definition Point (ODP) by providing each frame dequeued from it + * with an incrementing sequence number, this value is generally ignored unless + * that sequence of dequeued frames will need order restoration later. Each + * frame queue object also encapsulates an Order Restoration Point (ORP), which + * is a re-assembly context for re-ordering frames relative to their sequence + * numbers as they are enqueued. The ORP does not have to be within the frame + * queue that receives the enqueued frame, in fact it is usually the frame + * queue from which the frames were originally dequeued. For the purposes of + * order restoration, multiple frames (or "fragments") can be enqueued for a + * single sequence number by setting the QMAN_ENQUEUE_FLAG_NLIS flag for all + * enqueues except the final fragment of a given sequence number. Ordering + * between sequence numbers is guaranteed, even if fragments of different + * sequence numbers are interlaced with one another. Fragments of the same + * sequence number will retain the order in which they are enqueued. If no + * enqueue is to performed, QMAN_ENQUEUE_FLAG_HOLE indicates that the given + * sequence number is to be "skipped" by the ORP logic (eg. if a frame has been + * dropped from a sequence), or QMAN_ENQUEUE_FLAG_NESN indicates that the given + * sequence number should become the ORP's "Next Expected Sequence Number". + * + * Side note: a frame queue object can be used purely as an ORP, without + * carrying any frames at all. Care should be taken not to deallocate a frame + * queue object that is being actively used as an ORP, as a future allocation + * of the frame queue object may start using the internal ORP before the + * previous use has finished. + */ +int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags, + struct qman_fq *orp, u16 orp_seqnum); + +/** + * qman_alloc_fqid_range - Allocate a contiguous range of FQIDs + * @result: is set by the API to the base FQID of the allocated range + * @count: the number of FQIDs required + * @align: required alignment of the allocated range + * @partial: non-zero if the API can return fewer than @count FQIDs + * + * Returns the number of frame queues allocated, or a negative error code. If + * @partial is non zero, the allocation request may return a smaller range of + * FQs than requested (though alignment will be as requested). If @partial is + * zero, the return value will either be 'count' or negative. + */ +int qman_alloc_fqid_range(u32 *result, u32 count, u32 align, int partial); +static inline int qman_alloc_fqid(u32 *result) +{ + int ret = qman_alloc_fqid_range(result, 1, 0, 0); + return (ret > 0) ? 0 : ret; +} + +/** + * qman_release_fqid_range - Release the specified range of frame queue IDs + * @fqid: the base FQID of the range to deallocate + * @count: the number of FQIDs in the range + * + * This function can also be used to seed the allocator with ranges of FQIDs + * that it can subsequently allocate from. + */ +void qman_release_fqid_range(u32 fqid, unsigned int count); +static inline void qman_release_fqid(u32 fqid) +{ + qman_release_fqid_range(fqid, 1); +} + + /* Pool-channel management */ + /* ----------------------- */ +/** + * qman_alloc_pool_range - Allocate a contiguous range of pool-channel IDs + * @result: is set by the API to the base pool-channel ID of the allocated range + * @count: the number of pool-channel IDs required + * @align: required alignment of the allocated range + * @partial: non-zero if the API can return fewer than @count + * + * Returns the number of pool-channel IDs allocated, or a negative error code. + * If @partial is non zero, the allocation request may return a smaller range of + * than requested (though alignment will be as requested). If @partial is zero, + * the return value will either be 'count' or negative. + */ +int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial); +static inline int qman_alloc_pool(u32 *result) +{ + int ret = qman_alloc_pool_range(result, 1, 0, 0); + return (ret > 0) ? 0 : ret; +} + +/** + * qman_release_pool_range - Release the specified range of pool-channel IDs + * @id: the base pool-channel ID of the range to deallocate + * @count: the number of pool-channel IDs in the range + */ +void qman_release_pool_range(u32 id, unsigned int count); +static inline void qman_release_pool(u32 id) +{ + qman_release_pool_range(id, 1); +} + + + /* CGR management */ + /* -------------- */ +/** + * qman_create_cgr - Register a congestion group object + * @cgr: the 'cgr' object, with fields filled in + * @flags: QMAN_CGR_FLAG_* values + * @opts: optional state of CGR settings + * + * Registers this object to receiving congestion entry/exit callbacks on the + * portal affine to the cpu portal on which this API is executed. If opts is + * NULL then only the callback (cgr->cb) function is registered. If @flags + * contains QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset + * any unspecified parameters) will be used rather than a modify hw hardware + * (which only modifies the specified parameters). + */ +int qman_create_cgr(struct qman_cgr *cgr, u32 flags, + struct qm_mcc_initcgr *opts); + +/** + * qman_create_cgr_to_dcp - Register a congestion group object to DCP portal + * @cgr: the 'cgr' object, with fields filled in + * @flags: QMAN_CGR_FLAG_* values + * @dcp_portal: the DCP portal to which the cgr object is registered. + * @opts: optional state of CGR settings + * + */ +int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal, + struct qm_mcc_initcgr *opts); + +/** + * qman_delete_cgr - Deregisters a congestion group object + * @cgr: the 'cgr' object to deregister + * + * "Unplugs" this CGR object from the portal affine to the cpu on which this API + * is executed. This must be excuted on the same affine portal on which it was + * created. + */ +int qman_delete_cgr(struct qman_cgr *cgr); + +/** + * qman_modify_cgr - Modify CGR fields + * @cgr: the 'cgr' object to modify + * @flags: QMAN_CGR_FLAG_* values + * @opts: the CGR-modification settings + * + * The @opts parameter comes from the low-level portal API, and can be NULL. + * Note that some fields and options within @opts may be ignored or overwritten + * by the driver, in particular the 'cgrid' field is ignored (this operation + * only affects the given CGR object). If @flags contains + * QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset any + * unspecified parameters) will be used rather than a modify hw hardware (which + * only modifies the specified parameters). + */ +int qman_modify_cgr(struct qman_cgr *cgr, u32 flags, + struct qm_mcc_initcgr *opts); + +/** +* qman_query_cgr - Queries CGR fields +* @cgr: the 'cgr' object to query +* @result: storage for the queried congestion group record +*/ +int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *result); + +/** + * qman_query_congestion - Queries the state of all congestion groups + * @congestion: storage for the queried state of all congestion groups + */ +int qman_query_congestion(struct qm_mcr_querycongestion *congestion); + +/** + * qman_alloc_cgrid_range - Allocate a contiguous range of CGR IDs + * @result: is set by the API to the base CGR ID of the allocated range + * @count: the number of CGR IDs required + * @align: required alignment of the allocated range + * @partial: non-zero if the API can return fewer than @count + * + * Returns the number of CGR IDs allocated, or a negative error code. + * If @partial is non zero, the allocation request may return a smaller range of + * than requested (though alignment will be as requested). If @partial is zero, + * the return value will either be 'count' or negative. + */ +int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial); +static inline int qman_alloc_cgrid(u32 *result) +{ + int ret = qman_alloc_cgrid_range(result, 1, 0, 0); + return (ret > 0) ? 0 : ret; +} + +/** + * qman_release_cgrid_range - Release the specified range of CGR IDs + * @id: the base CGR ID of the range to deallocate + * @count: the number of CGR IDs in the range + */ +void qman_release_cgrid_range(u32 id, unsigned int count); +static inline void qman_release_cgrid(u32 id) +{ + qman_release_cgrid_range(id, 1); +} + + /* Helpers */ + /* ------- */ +/** + * qman_poll_fq_for_init - Check if an FQ has been initialised from OOS + * @fqid: the FQID that will be initialised by other s/w + * + * In many situations, a FQID is provided for communication between s/w + * entities, and whilst the consumer is responsible for initialising and + * scheduling the FQ, the producer(s) generally create a wrapper FQ object using + * and only call qman_enqueue() (no FQ initialisation, scheduling, etc). Ie; + * qman_create_fq(..., QMAN_FQ_FLAG_NO_MODIFY, ...); + * However, data can not be enqueued to the FQ until it is initialised out of + * the OOS state - this function polls for that condition. It is particularly + * useful for users of IPC functions - each endpoint's Rx FQ is the other + * endpoint's Tx FQ, so each side can initialise and schedule their Rx FQ object + * and then use this API on the (NO_MODIFY) Tx FQ object in order to + * synchronise. The function returns zero for success, +1 if the FQ is still in + * the OOS state, or negative if there was an error. + */ +static inline int qman_poll_fq_for_init(struct qman_fq *fq) +{ + struct qm_mcr_queryfq_np np; + int err; + err = qman_query_fq_np(fq, &np); + if (err) + return err; + if ((np.state & QM_MCR_NP_STATE_MASK) == QM_MCR_NP_STATE_OOS) + return 1; + return 0; +} + + /* -------------- */ + /* CEETM :: types */ + /* -------------- */ +/** + * Token Rate Structure + * Shaping rates are based on a "credit" system and a pre-configured h/w + * internal timer. The following type represents a shaper "rate" parameter as a + * fractional number of "tokens". Here's how it works. This (fractional) number + * of tokens is added to the shaper's "credit" every time the h/w timer elapses + * (up to a limit which is set by another shaper parameter). Every time a frame + * is enqueued through a shaper, the shaper deducts as many tokens as there are + * bytes of data in the enqueued frame. A shaper will not allow itself to + * enqueue any frames if its token count is negative. As such; + * + * The rate at which data is enqueued is limited by the + * rate at which tokens are added. + * + * Therefore if the user knows the period between these h/w timer updates in + * seconds, they can calculate the maximum traffic rate of the shaper (in + * bytes-per-second) from the token rate. And vice versa, they can calculate + * the token rate to use in order to achieve a given traffic rate. + */ +struct qm_ceetm_rate { + /* The token rate is; whole + (fraction/8192) */ + u32 whole:11; /* 0..2047 */ + u32 fraction:13; /* 0..8191 */ +}; + +struct qm_ceetm_weight_code { + /* The weight code is; 5 msbits + 3 lsbits */ + u8 y:5; + u8 x:3; +}; + +struct qm_ceetm { + unsigned int idx; + struct list_head sub_portals; + struct list_head lnis; + unsigned int sp_range[2]; + unsigned int lni_range[2]; +}; + +struct qm_ceetm_sp { + struct list_head node; + unsigned int idx; + unsigned int dcp_idx; + int is_claimed; + struct qm_ceetm_lni *lni; +}; + +/* Logical Network Interface */ +struct qm_ceetm_lni { + struct list_head node; + unsigned int idx; + unsigned int dcp_idx; + int is_claimed; + struct qm_ceetm_sp *sp; + struct list_head channels; + u8 shaper_enable; + u8 shaper_couple; + struct qm_ceetm_rate cr_token_rate; + struct qm_ceetm_rate er_token_rate; + u16 cr_token_bucket_limit; + u16 er_token_bucket_limit; +}; + +/* Class Queue Channel */ +struct qm_ceetm_channel { + struct list_head node; + unsigned int idx; + unsigned int lni_idx; + unsigned int dcp_idx; + struct list_head class_queues; + struct list_head ccgs; + u8 shaper_enable; + u8 shaper_couple; + struct qm_ceetm_rate cr_token_rate; + struct qm_ceetm_rate er_token_rate; + u16 cr_token_bucket_limit; + u16 er_token_bucket_limit; +}; + +struct qm_ceetm_ccg; + +/* This callback type is used when handling congestion entry/exit. The + * 'cb_ctx' value is the opaque value associated with ccg object. + * 'congested' is non-zero on congestion-entry, and zero on congestion-exit. + */ +typedef void (*qman_cb_ccgr)(struct qm_ceetm_ccg *ccg, void *cb_ctx, + int congested); + +/* Class Congestion Group */ +struct qm_ceetm_ccg { + struct qm_ceetm_channel *parent; + struct list_head node; + qman_cb_ccgr cb; + void *cb_ctx; + unsigned int idx; +}; + +/* Class Queue */ +struct qm_ceetm_cq { + struct qm_ceetm_channel *parent; + struct qm_ceetm_ccg *ccg; + struct list_head node; + unsigned int idx; + int is_claimed; + struct list_head bound_lfqids; + struct list_head binding_node; +}; + +/* Logical Frame Queue */ +struct qm_ceetm_lfq { + struct qm_ceetm_channel *parent; + struct list_head node; + unsigned int idx; + unsigned int dctidx; + u64 context_a; + u32 context_b; + qman_cb_mr ern; +}; + +/* Divide 'n' by 'd', rounding down if 'r' is negative, rounding up if + * it's positive, and rounding to the closest value if it's zero. NB, + * this macro assumes no particular type, so feed it with types that are + * appropriate for its use. NB, these arguments should not be expressions + * unless it is safe for them to be evaluated multiple times. Eg. do not + * pass in "some_value++" as a parameter to the macro! */ +#define ROUNDING(n, d, r) \ + (((r) < 0) ? ((n) / (d)) : \ + (((r) > 0) ? (((n) + (d) - 1) / (d)) : \ + (((n) + ((d) / 2)) / (d)))) + +/** + * qman_ceetm_bps2tokenrate - Given a desired rate 'bps' measured in bps + * (ie. bits-per-second), compute the 'token_rate' fraction that best + * approximates that rate. + * @bps: the desired shaper rate in bps. + * @token_rate: the output token rate computed with the given kbps. + * @rounding: dictates how to round if an exact conversion is not possible; if + * it is negative then 'token_rate' will round down to the highest value that + * does not exceed the desired rate, if it is positive then 'token_rate' will + * round up to the lowest value that is greater than or equal to the desired + * rate, and if it is zero then it will round to the nearest approximation, + * whether that be up or down. + * + * Return 0 for success, or -EINVAL if prescaler or qman clock is not available. + */ +int qman_ceetm_bps2tokenrate(u32 bps, + struct qm_ceetm_rate *token_rate, + int rounding); + +/** + * qman_ceetm_tokenrate2bps - Given a 'token_rate', compute the + * corresponding number of 'bps'. + * @token_rate: the input desired token_rate fraction. + * @bps: the output shaper rate in bps computed with the give token rate. + * @rounding: has the same semantics as the previous function. + * + * Return 0 for success, or -EINVAL if prescaler or qman clock is not available. + */ +int qman_ceetm_tokenrate2bps(const struct qm_ceetm_rate *token_rate, + u32 *bps, + int rounding); + +int qman_alloc_ceetm0_channel_range(u32 *result, u32 count, u32 align, + int partial); +static inline int qman_alloc_ceetm0_channel(u32 *result) +{ + int ret = qman_alloc_ceetm0_channel_range(result, 1, 0, 0); + return (ret > 0) ? 0 : ret; +} +void qman_release_ceetm0_channel_range(u32 channelid, u32 count); +static inline void qman_release_ceetm0_channelid(u32 channelid) +{ + qman_release_ceetm0_channel_range(channelid, 1); +} + +int qman_alloc_ceetm1_channel_range(u32 *result, u32 count, u32 align, + int partial); +static inline int qman_alloc_ceetm1_channel(u32 *result) +{ + int ret = qman_alloc_ceetm1_channel_range(result, 1, 0, 0); + return (ret > 0) ? 0 : ret; +} +void qman_release_ceetm1_channel_range(u32 channelid, u32 count); +static inline void qman_release_ceetm1_channelid(u32 channelid) +{ + qman_release_ceetm1_channel_range(channelid, 1); +} + +int qman_alloc_ceetm0_lfqid_range(u32 *result, u32 count, u32 align, + int partial); +static inline int qman_alloc_ceetm0_lfqid(u32 *result) +{ + int ret = qman_alloc_ceetm0_lfqid_range(result, 1, 0, 0); + return (ret > 0) ? 0 : ret; +} +void qman_release_ceetm0_lfqid_range(u32 lfqid, u32 count); +static inline void qman_release_ceetm0_lfqid(u32 lfqid) +{ + qman_release_ceetm0_lfqid_range(lfqid, 1); +} + +int qman_alloc_ceetm1_lfqid_range(u32 *result, u32 count, u32 align, + int partial); +static inline int qman_alloc_ceetm1_lfqid(u32 *result) +{ + int ret = qman_alloc_ceetm1_lfqid_range(result, 1, 0, 0); + return (ret > 0) ? 0 : ret; +} +void qman_release_ceetm1_lfqid_range(u32 lfqid, u32 count); +static inline void qman_release_ceetm1_lfqid(u32 lfqid) +{ + qman_release_ceetm1_lfqid_range(lfqid, 1); +} + /* ----------------------------- */ + /* CEETM :: sub-portals */ + /* ----------------------------- */ + +/** + * qman_ceetm_claim_sp - Claims the given sub-portal, provided it is available + * to us and configured for traffic-management. + * @sp: the returned sub-portal object, if successful. + * @dcp_id: specifies the desired Fman block (and thus the relevant CEETM + * instance), + * @sp_idx" is the desired sub-portal index from 0 to 15. + * + * Returns zero for success, or -ENODEV if the sub-portal is in use, or -EINVAL + * if the sp_idx is out of range. + * + * Note that if there are multiple driver domains (eg. a linux kernel versus + * user-space drivers in USDPAA, or multiple guests running under a hypervisor) + * then a sub-portal may be accessible by more than one instance of a qman + * driver and so it may be claimed multiple times. If this is the case, it is + * up to the system architect to prevent conflicting configuration actions + * coming from the different driver domains. The qman drivers do not have any + * behind-the-scenes coordination to prevent this from happening. + */ +int qman_ceetm_sp_claim(struct qm_ceetm_sp **sp, + enum qm_dc_portal dcp_idx, + unsigned int sp_idx); + +/** + * qman_ceetm_sp_release - Releases a previously claimed sub-portal. + * @sp: the sub-portal to be released. + * + * Returns 0 for success, or -EBUSY for failure if the dependencies are not + * released. + */ +int qman_ceetm_sp_release(struct qm_ceetm_sp *sp); + + /* ----------------------------------- */ + /* CEETM :: logical network interfaces */ + /* ----------------------------------- */ + +/** + * qman_ceetm_lni_claim - Claims an unclaimed LNI. + * @lni: the returned LNI object, if successful. + * @dcp_id: specifies the desired Fman block (and thus the relevant CEETM + * instance) + * @lni_idx: is the desired LNI index. + * + * Returns zero for success, or -EINVAL on failure, which will happen if the LNI + * is not available or has already been claimed (and not yet successfully + * released), or lni_dix is out of range. + * + * Note that there may be multiple driver domains (or instances) that need to + * transmit out the same LNI, so this claim is only guaranteeing exclusivity + * within the domain of the driver being called. See qman_ceetm_sp_claim() and + * qman_ceetm_sp_get_lni() for more information. + */ +int qman_ceetm_lni_claim(struct qm_ceetm_lni **lni, + enum qm_dc_portal dcp_id, + unsigned int lni_idx); + +/** + * qman_ceetm_lni_releaes - Releases a previously claimed LNI. + * @lni: the lni needs to be released. + * + * This will only succeed if all dependent objects have been released. + * Returns zero for success, or -EBUSY if the dependencies are not released. + */ +int qman_ceetm_lni_release(struct qm_ceetm_lni *lni); + +/** + * qman_ceetm_sp_set_lni + * qman_ceetm_sp_get_lni - Set/get the LNI that the sub-portal is currently + * mapped to. + * @sp: the given sub-portal. + * @lni(in "set"function): the LNI object which the sp will be mappaed to. + * @lni_idx(in "get" function): the LNI index which the sp is mapped to. + * + * Returns zero for success, or -EINVAL for the "set" function when this sp-lni + * mapping has been set, or configure mapping command returns error, and + * -EINVAL for "get" function when this sp-lni mapping is not set or the query + * mapping command returns error. + * + * This may be useful in situations where multiple driver domains have access + * to the same sub-portals in order to all be able to transmit out the same + * physical interface (perhaps they're on different IP addresses or VPNs, so + * Fman is splitting Rx traffic and here we need to converge Tx traffic). In + * that case, a control-plane is likely to use qman_ceetm_lni_claim() followed + * by qman_ceetm_sp_set_lni() to configure the sub-portal, and other domains + * are likely to use qman_ceetm_sp_get_lni() followed by qman_ceetm_lni_claim() + * in order to determine the LNI that the control-plane had assigned. This is + * why the "get" returns an index, whereas the "set" takes an (already claimed) + * LNI object. + */ +int qman_ceetm_sp_set_lni(struct qm_ceetm_sp *sp, + struct qm_ceetm_lni *lni); +int qman_ceetm_sp_get_lni(struct qm_ceetm_sp *sp, + unsigned int *lni_idx); + +/** + * qman_ceetm_lni_enable_shaper + * qman_ceetm_lni_disable_shaper - Enables/disables shaping on the LNI. + * @lni: the given LNI. + * @coupled: indicates whether CR and ER are coupled. + * @oal: the overhead accounting length which is added to the actual length of + * each frame when performing shaper calculations. + * + * When the number of (unused) committed-rate tokens reach the committed-rate + * token limit, 'coupled' indicates whether surplus tokens should be added to + * the excess-rate token count (up to the excess-rate token limit). + * When LNI is claimed, the shaper is disabled by default. The enable function + * will turn on this shaper for this lni. + * Whenever a claimed LNI is first enabled for shaping, its committed and + * excess token rates and limits are zero, so will need to be changed to do + * anything useful. The shaper can subsequently be enabled/disabled without + * resetting the shaping parameters, but the shaping parameters will be reset + * when the LNI is released. + * + * Returns zero for success, or errno for "enable" function in the cases as: + * a) -EINVAL if the shaper is already enabled, + * b) -EIO if the configure shaper command returns error. + * For "disable" function, returns: + * a) -EINVAL if the shaper is has already disabled. + * b) -EIO if calling configure shaper command returns error. + */ +int qman_ceetm_lni_enable_shaper(struct qm_ceetm_lni *lni, int coupled, + int oal); +int qman_ceetm_lni_disable_shaper(struct qm_ceetm_lni *lni); + +/** + * qman_ceetm_lni_set_commit_rate + * qman_ceetm_lni_get_commit_rate + * qman_ceetm_lni_set_excess_rate + * qman_ceetm_lni_get_excess_rate - Set/get the shaper CR/ER token rate and + * token limit for the given LNI. + * @lni: the given LNI. + * @token_rate: the desired token rate for "set" fuction, or the token rate of + * the LNI queried by "get" function. + * @token_limit: the desired token bucket limit for "set" function, or the token + * limit of the given LNI queried by "get" function. + * + * Returns zero for success. The "set" function returns -EINVAL if the given + * LNI is unshapped or -EIO if the configure shaper command returns error. + * The "get" function returns -EINVAL if the token rate or the token limit is + * not set or the query command returns error. + */ +int qman_ceetm_lni_set_commit_rate(struct qm_ceetm_lni *lni, + const struct qm_ceetm_rate *token_rate, + u16 token_limit); +int qman_ceetm_lni_get_commit_rate(struct qm_ceetm_lni *lni, + struct qm_ceetm_rate *token_rate, + u16 *token_limit); +int qman_ceetm_lni_set_excess_rate(struct qm_ceetm_lni *lni, + const struct qm_ceetm_rate *token_rate, + u16 token_limit); +int qman_ceetm_lni_get_excess_rate(struct qm_ceetm_lni *lni, + struct qm_ceetm_rate *token_rate, + u16 *token_limit); + +/** + * qman_ceetm_lni_set_tcfcc + * qman_ceetm_lni_get_tcfcc - Configure/query "Traffic Class Flow Control". + * @lni: the given LNI. + * @cq_level: is between 0 and 15, representing individual class queue levels + * (CQ0 to CQ7 for every channel) and grouped class queue levels (CQ8 to CQ15 + * for every channel). + * @traffic_class: is between 0 and 7 when associating a given class queue level + * to a traffic class, or -1 when disabling traffic class flow control for this + * class queue level. + * + * Return zero for success, or -EINVAL if the cq_level or traffic_class is out + * of range as indicated above, or -EIO if the configure/query tcfcc command + * returns error. + * + * Refer to the section of QMan CEETM traffic class flow control in the Refernce + * Manual. + */ +int qman_ceetm_lni_set_tcfcc(struct qm_ceetm_lni *lni, + unsigned int cq_level, + int traffic_class); +int qman_ceetm_lni_get_tcfcc(struct qm_ceetm_lni *lni, + unsigned int cq_level, + int *traffic_class); + + /* ----------------------------- */ + /* CEETM :: class queue channels */ + /* ----------------------------- */ + +/** + * qman_ceetm_channel_claim - Claims an unclaimed CQ channel that is mapped to + * the given LNI. + * @channel: the returned class queue channel object, if successful. + * @lni: the LNI that the channel belongs to. + * + * Channels are always initially "unshaped". + * + * Return zero for success, or -ENODEV if there is no channel available(all 32 + * channels are claimed) or -EINVAL if the channel mapping command returns + * error. + */ +int qman_ceetm_channel_claim(struct qm_ceetm_channel **channel, + struct qm_ceetm_lni *lni); + +/** + * qman_ceetm_channel_release - Releases a previously claimed CQ channel. + * @channel: the channel needs to be released. + * + * Returns zero for success, or -EBUSY if the dependencies are still in use. + * + * Note any shaping of the channel will be cleared to leave it in an unshaped + * state. + */ +int qman_ceetm_channel_release(struct qm_ceetm_channel *channel); + +/** + * qman_ceetm_channel_enable_shaper + * qman_ceetm_channel_disable_shaper - Enables/disables shaping on the channel. + * @channel: the given channel. + * @coupled: indicates whether surplus CR tokens should be added to the + * excess-rate token count (up to the excess-rate token limit) when the number + * of (unused) committed-rate tokens reach the committed_rate token limit. + * + * Whenever a claimed channel is first enabled for shaping, its committed and + * excess token rates and limits are zero, so will need to be changed to do + * anything useful. The shaper can subsequently be enabled/disabled without + * resetting the shaping parameters, but the shaping parameters will be reset + * when the channel is released. + * + * Return 0 for success, or -EINVAL for failure, in the case that the channel + * shaper has been enabled/disabled or the management command returns error. + */ +int qman_ceetm_channel_enable_shaper(struct qm_ceetm_channel *channel, + int coupled); +int qman_ceetm_channel_disable_shaper(struct qm_ceetm_channel *channel); + +/** + * qman_ceetm_channel_set_commit_rate + * qman_ceetm_channel_get_commit_rate + * qman_ceetm_channel_set_excess_rate + * qman_ceetm_channel_get_excess_rate - Set/get channel CR/ER shaper parameters. + * @channel: the given channel. + * @token_rate: the desired token rate for "set" function, or the queried token + * rate for "get" function. + * @token_limit: the desired token limit for "set" function, or the queried + * token limit for "get" function. + * + * Return zero for success. The "set" function returns -EINVAL if the channel + * is unshaped, or -EIO if the configure shapper command returns error. The + * "get" function returns -EINVAL if token rate of token limit is not set, or + * the query shaper command returns error. + */ +int qman_ceetm_channel_set_commit_rate(struct qm_ceetm_channel *channel, + const struct qm_ceetm_rate *token_rate, + u16 token_limit); +int qman_ceetm_channel_get_commit_rate(struct qm_ceetm_channel *channel, + struct qm_ceetm_rate *token_rate, + u16 *token_limit); +int qman_ceetm_channel_set_excess_rate(struct qm_ceetm_channel *channel, + const struct qm_ceetm_rate *token_rate, + u16 token_limit); +int qman_ceetm_channel_get_excess_rate(struct qm_ceetm_channel *channel, + struct qm_ceetm_rate *token_rate, + u16 *token_limit); + +/** + * qman_ceetm_channel_set_weight + * qman_ceetm_channel_get_weight - Set/get the weight for unshaped channel + * @channel: the given channel. + * @token_limit: the desired token limit as the weight of the unshaped channel + * for "set" function, or the queried token limit for "get" function. + * + * The algorithm of unshaped fair queuing (uFQ) is used for unshaped channel. + * It allows the unshaped channels to be included in the CR time eligible list, + * and thus use the configured CR token limit value as their fair queuing + * weight. + * + * Return zero for success, or -EINVAL if the channel is a shaped channel or + * the management command returns error. + */ +int qman_ceetm_channel_set_weight(struct qm_ceetm_channel *channel, + u16 token_limit); +int qman_ceetm_channel_get_weight(struct qm_ceetm_channel *channel, + u16 *token_limit); + +/** + * qman_ceetm_channel_set_group + * qman_ceetm_channel_get_group - Set/get the grouping of the class scheduler. + * @channel: the given channel. + * @group_b: indicates whether there is group B in this channel. + * @prio_a: the priority of group A. + * @prio_b: the priority of group B. + * + * There are 8 individual class queues (CQ0-CQ7), and 8 grouped class queues + * (CQ8-CQ15). If 'group_b' is zero, then all the grouped class queues are in + * group A, otherwise they are split into group A (CQ8-11) and group B + * (CQ12-C15). The individual class queues and the group(s) are in strict + * priority order relative to each other. Within the group(s), the scheduling + * is not strict priority order, but the result of scheduling within a group + * is in strict priority order relative to the other class queues in the + * channel. 'prio_a' and 'prio_b' control the priority order of the groups + * relative to the individual class queues, and take values from 0-7. Eg. if + * 'group_b' is non-zero, 'prio_a' is 2 and 'prio_b' is 6, then the strict + * priority order would be; + * CQ0, CQ1, CQ2, GROUPA, CQ3, CQ4, CQ5, CQ6, GROUPB, CQ7 + * + * Return 0 for success. For "set" function, returns -EINVAL if prio_a or + * prio_b are out of the range 1 - 7 (priority of group A or group B can not + * be 0, CQ0 is always the highest class queue in this channel.), or -EIO if + * the configure scheduler command returns error. For "get" function, return + * -EINVAL if the query scheduler command returns error. + */ +int qman_ceetm_channel_set_group(struct qm_ceetm_channel *channel, + int group_b, + unsigned int prio_a, + unsigned int prio_b); +int qman_ceetm_channel_get_group(struct qm_ceetm_channel *channel, + int *group_b, + unsigned int *prio_a, + unsigned int *prio_b); + + /* --------------------- */ + /* CEETM :: class queues */ + /* --------------------- */ + +/** + * qman_ceetm_cq_claim - Claims an individual class queue. + * @cq: the returned class queue object, if successful. + * @channel: the class queue channel. + * @idx: is from 0 to 7 (representing CQ0 to CQ7). + * @ccg: represents the class congestion group that this class queue should be + * subscribed to, or NULL if no congestion group membership is desired. + * + * Returns zero for success, or -EINVAL if @idx is out of range 0 - 7 or + * if this class queue has been claimed, or configure class queue command + * returns error, or returns -ENOMEM if allocating CQ memory fails. + */ +int qman_ceetm_cq_claim(struct qm_ceetm_cq **cq, + struct qm_ceetm_channel *channel, + unsigned int idx, + struct qm_ceetm_ccg *ccg); + +/** + * qman_ceetm_cq_claim_A - Claims a class queue group A. + * @cq: the returned class queue object, if successful. + * @channel: the class queue channel. + * @idx: is from 8 to 15 if only group A exits, otherwise, it is from 8 to 11. + * @ccg: represents the class congestion group that this class queue should be + * subscribed to, or NULL if no congestion group membership is desired. + * + * Return zero for success, or -EINVAL if @idx is out the range or if + * this class queue has been claimed or configure class queue command returns + * error, or returns -ENOMEM if allocating CQ memory fails. + */ +int qman_ceetm_cq_claim_A(struct qm_ceetm_cq **cq, + struct qm_ceetm_channel *channel, + unsigned int idx, + struct qm_ceetm_ccg *ccg); + +/** + * qman_ceetm_cq_claim_B - Claims a class queue group B. + * @cq: the returned class queue object, if successful. + * @channel: the class queue channel. + * @idx: is from 0 to 3 (CQ12 to CQ15). + * @ccg: represents the class congestion group that this class queue should be + * subscribed to, or NULL if no congestion group membership is desired. + * + * Return zero for success, or -EINVAL if @idx is out the range or if + * this class queue has been claimed or configure class queue command returns + * error, or returns -ENOMEM if allocating CQ memory fails. + */ +int qman_ceetm_cq_claim_B(struct qm_ceetm_cq **cq, + struct qm_ceetm_channel *channel, + unsigned int idx, + struct qm_ceetm_ccg *ccg); + +/** + * qman_ceetm_cq_release - Releases a previously claimed class queue. + * @cq: The class queue to be released. + * + * Return zero for success, or -EBUSY if the dependent objects (eg. logical + * FQIDs) have not been released. + */ +int qman_ceetm_cq_release(struct qm_ceetm_cq *cq); + +/** + * qman_ceetm_set_queue_weight + * qman_ceetm_get_queue_weight - Configure/query the weight of a grouped class + * queue. + * @cq: the given class queue. + * @weight_code: the desired weight code to set for the given class queue for + * "set" function or the queired weight code for "get" function. + * + * Grouped class queues have a default weight code of zero, which corresponds to + * a scheduler weighting of 1. This function can be used to modify a grouped + * class queue to another weight, (Use the helpers qman_ceetm_wbfs2ratio() + * and qman_ceetm_ratio2wbfs() to convert between these 'weight_code' values + * and the corresponding sharing weight.) + * + * Returns zero for success, or -EIO if the configure weight command returns + * error for "set" function, or -EINVAL if the query command returns + * error for "get" function. + * See section "CEETM Weighted Scheduling among Grouped Classes" in Reference + * Manual for weight and weight code. + */ +int qman_ceetm_set_queue_weight(struct qm_ceetm_cq *cq, + struct qm_ceetm_weight_code *weight_code); +int qman_ceetm_get_queue_weight(struct qm_ceetm_cq *cq, + struct qm_ceetm_weight_code *weight_code); + +/* Weights are encoded using a pseudo-exponential scheme. The weight codes 0, + * 32, 64, [...] correspond to weights of 1, 2, 4, [...]. The weights + * corresponding to intermediate weight codes are calculated using linear + * interpolation on the inverted values. Or put another way, the inverse weights + * for each 32nd weight code are 1, 1/2, 1/4, [...], and so the intervals + * between these are divided linearly into 32 intermediate values, the inverses + * of which form the remaining weight codes. + * + * The Weighted Bandwidth Fair Scheduling (WBFS) algorithm provides a form of + * scheduling within a group of class queues (group A or B). Weights are used to + * normalise the class queues to an underlying BFS algorithm where all class + * queues are assumed to require "equal bandwidth". So the weights referred to + * by the weight codes act as divisors on the size of frames being enqueued. Ie. + * one class queue in a group is assigned a weight of 2 whilst the other class + * queues in the group keep the default weight of 1, then the WBFS scheduler + * will effectively treat all frames enqueued on the weight-2 class queue as + * having half the number of bytes they really have. Ie. if all other things are + * equal, that class queue would get twice as much bytes-per-second bandwidth as + * the others. So weights should be chosen to provide bandwidth ratios between + * members of the same class queue group. These weights have no bearing on + * behaviour outside that group's WBFS mechanism though. + */ + +/** + * qman_ceetm_wbfs2ratio - Given a weight code ('wbfs'), an accurate fractional + * representation of the corresponding weight is given (in order to not lose + * any precision). + * @weight_code: The given weight code in WBFS. + * @numerator: the numerator part of the weight computed by the weight code. + * @denominator: the denominator part of the weight computed by the weight code + * + * Returns zero for success or -EINVAL if the given weight code is illegal. + */ +int qman_ceetm_wbfs2ratio(unsigned int weight_code, + u32 *numerator, + u32 *denominator); +/** + * qman_ceetm_ratio2wbfs - Given a weight, find the nearest possible weight code + * If the user needs to know how close this is, convert the resulting weight + * code back to a weight and compare. + * @numerator: numerator part of the given weight. + * @denominator: denominator part of the given weight. + * @weight_code: the weight code computed from the given weight. + * + * Returns zero for success, or -ERANGE if "numerator/denominator" is outside + * the range of weights. + */ +int qman_ceetm_ratio2wbfs(u32 numerator, + u32 denominator, + unsigned int *weight_code, + int rounding); + +#define QMAN_CEETM_FLAG_CLEAR_STATISTICS_COUNTER 0x1 +/** + * qman_ceetm_cq_get_dequeue_statistics - Get the statistics provided by CEETM + * CQ counters. + * @cq: the given CQ object. + * @flags: indicates whether the statistics counter will be cleared after query. + * @frame_count: The number of the frames that have been counted since the + * counter was cleared last time. + * @byte_count: the number of bytes in all frames that have been counted. + * + * Return zero for success or -EINVAL if query statistics command returns error. + * + */ +int qman_ceetm_cq_get_dequeue_statistics(struct qm_ceetm_cq *cq, u32 flags, + u64 *frame_count, u64 *byte_count); + + /* ---------------------- */ + /* CEETM :: logical FQIDs */ + /* ---------------------- */ +/** + * qman_ceetm_lfq_claim - Claims an unused logical FQID, associates it with + * the given class queue. + * @lfq: the returned lfq object, if successful. + * @cq: the class queue which needs to claim a LFQID. + * + * Return zero for success, or -ENODEV if no LFQID is available or -ENOMEM if + * allocating memory for lfq fails, or -EINVAL if configuring LFQMT fails. + */ +int qman_ceetm_lfq_claim(struct qm_ceetm_lfq **lfq, + struct qm_ceetm_cq *cq); + +/** + * qman_ceetm_lfq_release - Releases a previously claimed logical FQID. + * @lfq: the lfq to be released. + * + * Return zero for success. + */ +int qman_ceetm_lfq_release(struct qm_ceetm_lfq *lfq); + +/** + * qman_ceetm_lfq_set_context + * qman_ceetm_lfq_get_context - Set/get the context_a/context_b pair to the + * "dequeue context table" associated with the logical FQID. + * @lfq: the given logical FQ object. + * @context_a: contextA of the dequeue context. + * @context_b: contextB of the dequeue context. + * + * Returns zero for success, or -EINVAL if there is error to set/get the + * context pair. + */ +int qman_ceetm_lfq_set_context(struct qm_ceetm_lfq *lfq, + u64 context_a, + u32 context_b); +int qman_ceetm_lfq_get_context(struct qm_ceetm_lfq *lfq, + u64 *context_a, + u32 *context_b); + +/** + * qman_ceetm_create_fq - Initialise a FQ object for the LFQ. + * @lfq: the given logic fq. + * @fq: the fq object created for the given logic fq. + * + * The FQ object can be used in qman_enqueue() and qman_enqueue_orp() APIs to + * target a logical FQID (and the class queue it is associated with). + * Note that this FQ object can only be used for enqueues, and + * in the case of qman_enqueue_orp() it can not be used as the 'orp' parameter, + * only as 'fq'. This FQ object can not (and shouldn't) be destroyed, it is only + * valid as long as the underlying 'lfq' remains claimed. It is the user's + * responsibility to ensure that the underlying 'lfq' is not released until any + * enqueues to this FQ object have completed. The only field the user needs to + * fill in is fq->cb.ern, as that enqueue rejection handler is the callback that + * could conceivably be called on this FQ object. This API can be called + * multiple times to create multiple FQ objects referring to the same logical + * FQID, and any enqueue rejections will respect the callback of the object that + * issued the enqueue (and will identify the object via the parameter passed to + * the callback too). There is no 'flags' parameter to this API as there is for + * qman_create_fq() - the created FQ object behaves as though qman_create_fq() + * had been called with the single flag QMAN_FQ_FLAG_NO_MODIFY. + * + * Returns 0 for success. + */ +int qman_ceetm_create_fq(struct qm_ceetm_lfq *lfq, struct qman_fq *fq); + + /* -------------------------------- */ + /* CEETM :: class congestion groups */ + /* -------------------------------- */ + +/** + * qman_ceetm_ccg_claim - Claims an unused CCG. + * @ccg: the returned CCG object, if successful. + * @channel: the given class queue channel + * @cscn: the callback function of this CCG. + * @cb_ctx: the corresponding context to be used used if state change + * notifications are later enabled for this CCG. + * + * The congestion group is local to the given class queue channel, so only + * class queues within the channel can be associated with that congestion group. + * The association of class queues to congestion groups occurs when the class + * queues are claimed, see qman_ceetm_cq_claim() and related functions. + * Congestion groups are in a "zero" state when initially claimed, and they are + * returned to that state when released. + * + * Return zero for success, or -EINVAL if no CCG in the channel is available. + */ +int qman_ceetm_ccg_claim(struct qm_ceetm_ccg **ccg, + struct qm_ceetm_channel *channel, + unsigned int idx, + void (*cscn)(struct qm_ceetm_ccg *, + void *cb_ctx, + int congested), + void *cb_ctx); + +/** + * qman_ceetm_ccg_release - Releases a previously claimed CCG. + * @ccg: the given ccg. + * + * Returns zero for success, or -EBUSY if the given ccg's dependent objects + * (class queues that are associated with the CCG) have not been released. + */ +int qman_ceetm_ccg_release(struct qm_ceetm_ccg *ccg); + +/* This struct is used to specify attributes for a CCG. The 'we_mask' field + * controls which CCG attributes are to be updated, and the remainder specify + * the values for those attributes. A CCG counts either frames or the bytes + * within those frames, but not both ('mode'). A CCG can optionally cause + * enqueues to be rejected, due to tail-drop or WRED, or both (they are + * independent options, 'td_en' and 'wr_en_g,wr_en_y,wr_en_r'). Tail-drop can be + * level-triggered due to a single threshold ('td_thres') or edge-triggered due + * to a "congestion state", but not both ('td_mode'). Congestion state has + * distinct entry and exit thresholds ('cs_thres_in' and 'cs_thres_out'), and + * notifications can be sent to software the CCG goes in to and out of this + * congested state ('cscn_en'). */ +struct qm_ceetm_ccg_params { + /* Boolean fields together in a single bitfield struct */ + struct { + /* Whether to count bytes or frames. 1==frames */ + int mode:1; + /* En/disable tail-drop. 1==enable */ + int td_en:1; + /* Tail-drop on congestion-state or threshold. 1=threshold */ + int td_mode:1; + /* Generate congestion state change notifications. 1==enable */ + int cscn_en:1; + /* Enable WRED rejections (per colour). 1==enable */ + int wr_en_g:1; + int wr_en_y:1; + int wr_en_r:1; + } __packed; + /* Tail-drop threshold. See qm_cgr_thres_[gs]et64(). */ + struct qm_cgr_cs_thres td_thres; + /* Congestion state thresholds, for entry and exit. */ + struct qm_cgr_cs_thres cs_thres_in; + struct qm_cgr_cs_thres cs_thres_out; + /* Overhead accounting length. Per-packet "tax", from -128 to +127 */ + signed char oal; + /* Congestion state change notification for DCP portal, virtual CCGID*/ + /* WRED parameters. */ + struct qm_cgr_wr_parm wr_parm_g; + struct qm_cgr_wr_parm wr_parm_y; + struct qm_cgr_wr_parm wr_parm_r; +}; +/* Bits used in 'we_mask' to qman_ceetm_ccg_set(), controls which attributes of + * the CCGR are to be updated. */ +#define QM_CCGR_WE_CDV 0x0000 /* cdv */ +#define QM_CCGR_WE_MODE 0x0001 /* mode (bytes/frames) */ +#define QM_CCGR_WE_CS_THRES_IN 0x0002 /* congestion state entry threshold */ +#define QM_CCGR_WE_TD_EN 0x0004 /* congestion state tail-drop enable */ +#define QM_CCGR_WE_CSCN_TUPD 0x0008 /* CSCN target update */ +#define QM_CCGR_WE_CSCN_EN 0x0010 /* congestion notification enable */ +#define QM_CCGR_WE_WR_EN_R 0x0020 /* WRED enable - red */ +#define QM_CCGR_WE_WR_EN_Y 0x0040 /* WRED enable - yellow */ +#define QM_CCGR_WE_WR_EN_G 0x0080 /* WRED enable - green */ +#define QM_CCGR_WE_WR_PARM_R 0x0100 /* WRED parameters - red */ +#define QM_CCGR_WE_WR_PARM_Y 0x0200 /* WRED parameters - yellow */ +#define QM_CCGR_WE_WR_PARM_G 0x0400 /* WRED parameters - green */ +#define QM_CCGR_WE_OAL 0x0800 /* overhead accounting length */ +#define QM_CCGR_WE_CS_THRES_OUT 0x1000 /* congestion state exit threshold */ +#define QM_CCGR_WE_TD_THRES 0x2000 /* tail-drop threshold */ +#define QM_CCGR_WE_TD_MODE 0x4000 /* tail-drop mode (state/threshold) */ + +/** + * qman_ceetm_ccg_set + * qman_ceetm_ccg_get - Configure/query a subset of CCG attributes. + * @ccg: the given CCG object. + * @we_mask: the write enable mask. + * @params: the parameters setting for this ccg + * + * Return 0 for success, or -EIO if configure ccg command returns error for + * "set" function, or -EINVAL if query ccg command returns error for "get" + * function. + */ +int qman_ceetm_ccg_set(struct qm_ceetm_ccg *ccg, + u16 we_mask, + const struct qm_ceetm_ccg_params *params); +int qman_ceetm_ccg_get(struct qm_ceetm_ccg *ccg, + struct qm_ceetm_ccg_params *params); + +/** qman_ceetm_cscn_swp_set - Add or remove a software portal from the target + * mask. + * qman_ceetm_cscn_swp_get - Query whether a given software portal index is + * in the cscn target mask. + * @ccg: the give CCG object. + * @swp_idx: the index of the software portal. + * @cscn_enabled: 1: Set the swp to be cscn target. 0: remove the swp from + * the target mask. + * @we_mask: the write enable mask. + * @params: the parameters setting for this ccg + * + * Return 0 for success, or -EINVAL if command in set/get function fails. + */ +int qman_ceetm_cscn_swp_set(struct qm_ceetm_ccg *ccg, + u16 swp_idx, + unsigned int cscn_enabled, + u16 we_mask, + const struct qm_ceetm_ccg_params *params); +int qman_ceetm_cscn_swp_get(struct qm_ceetm_ccg *ccg, + u16 swp_idx, + unsigned int *cscn_enabled); + +/** qman_ceetm_cscn_dcp_set - Add or remove a direct connect portal from the\ + * target mask. + * qman_ceetm_cscn_swp_get - Query whether a given direct connect portal index + * is in the cscn target mask. + * @ccg: the give CCG object. + * @dcp_idx: the index of the direct connect portal. + * @vcgid: congestion state change notification for dcp portal, virtual CGID. + * @cscn_enabled: 1: Set the dcp to be cscn target. 0: remove the dcp from + * the target mask. + * @we_mask: the write enable mask. + * @params: the parameters setting for this ccg + * + * Return 0 for success, or -EINVAL if command in set/get function fails. + */ +int qman_ceetm_cscn_dcp_set(struct qm_ceetm_ccg *ccg, + u16 dcp_idx, + u8 vcgid, + unsigned int cscn_enabled, + u16 we_mask, + const struct qm_ceetm_ccg_params *params); +int qman_ceetm_cscn_dcp_get(struct qm_ceetm_ccg *ccg, + u16 dcp_idx, + u8 *vcgid, + unsigned int *cscn_enabled); + +/** + * qman_ceetm_ccg_get_reject_statistics - Get the statistics provided by + * CEETM CCG counters. + * @ccg: the given CCG object. + * @flags: indicates whether the statistics counter will be cleared after query. + * @frame_count: The number of the frames that have been counted since the + * counter was cleared last time. + * @byte_count: the number of bytes in all frames that have been counted. + * + * Return zero for success or -EINVAL if query statistics command returns error. + * + */ +int qman_ceetm_ccg_get_reject_statistics(struct qm_ceetm_ccg *ccg, u32 flags, + u64 *frame_count, u64 *byte_count); + +/** + * qman_set_wpm - Set waterfall power management + * + * @wpm_enable: boolean, 1 = enable wpm, 0 = disable wpm. + * + * Return 0 for success, return -ENODEV if QMan misc_cfg register is not + * accessible. + */ +int qman_set_wpm(int wpm_enable); + +/** + * qman_get_swp - Query the waterfall power management setting + * + * @wpm_enable: boolean, 1 = enable wpm, 0 = disable wpm. + * + * Return 0 for success, return -ENODEV if QMan misc_cfg register is not + * accessible. + */ +int qman_get_wpm(int *wpm_enable); + +#ifdef __cplusplus +} +#endif + +#endif /* FSL_QMAN_H */ diff --git a/include/linux/fsl_usdpaa.h b/include/linux/fsl_usdpaa.h new file mode 100644 index 0000000..0d2ed54 --- /dev/null +++ b/include/linux/fsl_usdpaa.h @@ -0,0 +1,53 @@ +/* Copyright 2011-2012 Freescale Semiconductor, Inc. + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#ifndef FSL_USDPAA_H +#define FSL_USDPAA_H + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef __KERNEL__ +/* This interface is needed in a few places and though it's not specific to + * USDPAA as such, creating a new header for it doesn't make any sense. The + * qbman kernel driver implements this interface and uses it as the backend for + * both the FQID and BPID allocators. + */ +struct dpa_alloc { + struct list_head list; + spinlock_t lock; +}; +#define DECLARE_DPA_ALLOC(name) \ + struct dpa_alloc name = { \ + .list = { \ + .prev = &name.list, \ + .next = &name.list \ + }, \ + .lock = __SPIN_LOCK_UNLOCKED(name.lock) \ + } +static inline void dpa_alloc_init(struct dpa_alloc *alloc) +{ + INIT_LIST_HEAD(&alloc->list); + spin_lock_init(&alloc->lock); +} +int dpa_alloc_new(struct dpa_alloc *alloc, u32 *result, u32 count, u32 align, + int partial); +void dpa_alloc_free(struct dpa_alloc *alloc, u32 base_id, u32 count); +/* Like 'new' but specifies the desired range, returns -ENOMEM if the entire + * desired range is not available, or 0 for success. */ +int dpa_alloc_reserve(struct dpa_alloc *alloc, u32 base_id, u32 count); +/* Pops and returns contiguous ranges from the allocator. Returns -ENOMEM when + * 'alloc' is empty. */ +int dpa_alloc_pop(struct dpa_alloc *alloc, u32 *result, u32 *count); +#endif /* __KERNEL__ */ + +#ifdef __cplusplus +} +#endif + +#endif /* FSL_USDPAA_H */ -- cgit v0.10.2