summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/atm/solos-pci.c6
-rw-r--r--drivers/net/bonding/bond_sysfs.c1
-rw-r--r--drivers/net/dsa/mv88e6xxx.c230
-rw-r--r--drivers/net/dsa/mv88e6xxx.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c330
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h13
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h865
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c62
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h16
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c138
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c200
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c65
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c162
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h27
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_values.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c11
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h13
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c756
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h178
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c82
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c588
-rw-r--r--drivers/net/ethernet/freescale/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/Makefile2
-rw-r--r--drivers/net/ethernet/freescale/fman/Kconfig8
-rw-r--r--drivers/net/ethernet/freescale/fman/Makefile7
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c2871
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.h325
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c1453
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.h59
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_mac.h278
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c1170
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.h60
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_muram.c158
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_muram.h51
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c1778
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.h151
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_sp.c166
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_sp.h103
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.c786
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.h55
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c978
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.h97
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c3
-rw-r--r--drivers/net/ethernet/ibm/Kconfig10
-rw-r--r--drivers/net/ethernet/ibm/Makefile1
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c4
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c3585
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h1046
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c6
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_mbx.c50
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_mbx.h4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c28
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c61
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c66
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.h15
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_tlv.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_tlv.h4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_type.h9
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_vf.c44
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_vf.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c59
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c9
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c46
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c4
-rw-r--r--drivers/net/ethernet/sfc/ef10.c68
-rw-r--r--drivers/net/ethernet/sfc/efx.c7
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c250
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h10
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c10
-rw-r--r--drivers/net/geneve.c17
-rw-r--r--drivers/net/phy/marvell.c135
-rw-r--r--drivers/net/phy/micrel.c96
-rw-r--r--drivers/net/usb/asix_common.c2
-rw-r--r--drivers/net/vxlan.c9
88 files changed, 18000 insertions, 2031 deletions
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index 0c2b4ba0..6ac2b2b 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -525,7 +525,7 @@ struct geos_gpio_attr {
static ssize_t geos_gpio_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ struct pci_dev *pdev = to_pci_dev(dev);
struct geos_gpio_attr *gattr = container_of(attr, struct geos_gpio_attr, attr);
struct solos_card *card = pci_get_drvdata(pdev);
uint32_t data32;
@@ -551,7 +551,7 @@ static ssize_t geos_gpio_store(struct device *dev, struct device_attribute *attr
static ssize_t geos_gpio_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ struct pci_dev *pdev = to_pci_dev(dev);
struct geos_gpio_attr *gattr = container_of(attr, struct geos_gpio_attr, attr);
struct solos_card *card = pci_get_drvdata(pdev);
uint32_t data32;
@@ -565,7 +565,7 @@ static ssize_t geos_gpio_show(struct device *dev, struct device_attribute *attr,
static ssize_t hardware_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ struct pci_dev *pdev = to_pci_dev(dev);
struct geos_gpio_attr *gattr = container_of(attr, struct geos_gpio_attr, attr);
struct solos_card *card = pci_get_drvdata(pdev);
uint32_t data32;
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index f4ae720..313dbac 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -42,7 +42,6 @@
#include <net/bonding.h>
-#define to_dev(obj) container_of(obj, struct device, kobj)
#define to_bond(cd) ((struct bonding *)(netdev_priv(to_net_dev(cd))))
/* "show" function for the bond_masters attribute.
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index 75e245c..9fe33fc 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -617,98 +617,112 @@ static void _mv88e6xxx_stats_read(struct dsa_switch *ds, int stat, u32 *val)
}
static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = {
- { "in_good_octets", 8, 0x00, },
- { "in_bad_octets", 4, 0x02, },
- { "in_unicast", 4, 0x04, },
- { "in_broadcasts", 4, 0x06, },
- { "in_multicasts", 4, 0x07, },
- { "in_pause", 4, 0x16, },
- { "in_undersize", 4, 0x18, },
- { "in_fragments", 4, 0x19, },
- { "in_oversize", 4, 0x1a, },
- { "in_jabber", 4, 0x1b, },
- { "in_rx_error", 4, 0x1c, },
- { "in_fcs_error", 4, 0x1d, },
- { "out_octets", 8, 0x0e, },
- { "out_unicast", 4, 0x10, },
- { "out_broadcasts", 4, 0x13, },
- { "out_multicasts", 4, 0x12, },
- { "out_pause", 4, 0x15, },
- { "excessive", 4, 0x11, },
- { "collisions", 4, 0x1e, },
- { "deferred", 4, 0x05, },
- { "single", 4, 0x14, },
- { "multiple", 4, 0x17, },
- { "out_fcs_error", 4, 0x03, },
- { "late", 4, 0x1f, },
- { "hist_64bytes", 4, 0x08, },
- { "hist_65_127bytes", 4, 0x09, },
- { "hist_128_255bytes", 4, 0x0a, },
- { "hist_256_511bytes", 4, 0x0b, },
- { "hist_512_1023bytes", 4, 0x0c, },
- { "hist_1024_max_bytes", 4, 0x0d, },
- /* Not all devices have the following counters */
- { "sw_in_discards", 4, 0x110, },
- { "sw_in_filtered", 2, 0x112, },
- { "sw_out_filtered", 2, 0x113, },
-
+ { "in_good_octets", 8, 0x00, BANK0, },
+ { "in_bad_octets", 4, 0x02, BANK0, },
+ { "in_unicast", 4, 0x04, BANK0, },
+ { "in_broadcasts", 4, 0x06, BANK0, },
+ { "in_multicasts", 4, 0x07, BANK0, },
+ { "in_pause", 4, 0x16, BANK0, },
+ { "in_undersize", 4, 0x18, BANK0, },
+ { "in_fragments", 4, 0x19, BANK0, },
+ { "in_oversize", 4, 0x1a, BANK0, },
+ { "in_jabber", 4, 0x1b, BANK0, },
+ { "in_rx_error", 4, 0x1c, BANK0, },
+ { "in_fcs_error", 4, 0x1d, BANK0, },
+ { "out_octets", 8, 0x0e, BANK0, },
+ { "out_unicast", 4, 0x10, BANK0, },
+ { "out_broadcasts", 4, 0x13, BANK0, },
+ { "out_multicasts", 4, 0x12, BANK0, },
+ { "out_pause", 4, 0x15, BANK0, },
+ { "excessive", 4, 0x11, BANK0, },
+ { "collisions", 4, 0x1e, BANK0, },
+ { "deferred", 4, 0x05, BANK0, },
+ { "single", 4, 0x14, BANK0, },
+ { "multiple", 4, 0x17, BANK0, },
+ { "out_fcs_error", 4, 0x03, BANK0, },
+ { "late", 4, 0x1f, BANK0, },
+ { "hist_64bytes", 4, 0x08, BANK0, },
+ { "hist_65_127bytes", 4, 0x09, BANK0, },
+ { "hist_128_255bytes", 4, 0x0a, BANK0, },
+ { "hist_256_511bytes", 4, 0x0b, BANK0, },
+ { "hist_512_1023bytes", 4, 0x0c, BANK0, },
+ { "hist_1024_max_bytes", 4, 0x0d, BANK0, },
+ { "sw_in_discards", 4, 0x10, PORT, },
+ { "sw_in_filtered", 2, 0x12, PORT, },
+ { "sw_out_filtered", 2, 0x13, PORT, },
+ { "in_discards", 4, 0x00 | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "in_filtered", 4, 0x01 | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "in_accepted", 4, 0x02 | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "in_bad_accepted", 4, 0x03 | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "in_good_avb_class_a", 4, 0x04 | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "in_good_avb_class_b", 4, 0x05 | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "in_bad_avb_class_a", 4, 0x06 | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "in_bad_avb_class_b", 4, 0x07 | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "tcam_counter_0", 4, 0x08 | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "tcam_counter_1", 4, 0x09 | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "tcam_counter_2", 4, 0x0a | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "tcam_counter_3", 4, 0x0b | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "in_da_unknown", 4, 0x0e | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "in_management", 4, 0x0f | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "out_queue_0", 4, 0x10 | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "out_queue_1", 4, 0x11 | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "out_queue_2", 4, 0x12 | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "out_queue_3", 4, 0x13 | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "out_queue_4", 4, 0x14 | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "out_queue_5", 4, 0x15 | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "out_queue_6", 4, 0x16 | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "out_queue_7", 4, 0x17 | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "out_cut_through", 4, 0x18 | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "out_octets_a", 4, 0x1a | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "out_octets_b", 4, 0x1b | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "out_management", 4, 0x1f | GLOBAL_STATS_OP_BANK_1, BANK1, },
};
-static bool have_sw_in_discards(struct dsa_switch *ds)
+static bool mv88e6xxx_has_stat(struct dsa_switch *ds,
+ struct mv88e6xxx_hw_stat *stat)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
- switch (ps->id) {
- case PORT_SWITCH_ID_6095: case PORT_SWITCH_ID_6161:
- case PORT_SWITCH_ID_6165: case PORT_SWITCH_ID_6171:
- case PORT_SWITCH_ID_6172: case PORT_SWITCH_ID_6176:
- case PORT_SWITCH_ID_6182: case PORT_SWITCH_ID_6185:
- case PORT_SWITCH_ID_6352:
+ switch (stat->type) {
+ case BANK0:
return true;
- default:
- return false;
- }
-}
-
-static void _mv88e6xxx_get_strings(struct dsa_switch *ds,
- int nr_stats,
- struct mv88e6xxx_hw_stat *stats,
- int port, uint8_t *data)
-{
- int i;
-
- for (i = 0; i < nr_stats; i++) {
- memcpy(data + i * ETH_GSTRING_LEN,
- stats[i].string, ETH_GSTRING_LEN);
+ case BANK1:
+ return mv88e6xxx_6320_family(ds);
+ case PORT:
+ return mv88e6xxx_6095_family(ds) ||
+ mv88e6xxx_6185_family(ds) ||
+ mv88e6xxx_6097_family(ds) ||
+ mv88e6xxx_6165_family(ds) ||
+ mv88e6xxx_6351_family(ds) ||
+ mv88e6xxx_6352_family(ds);
}
+ return false;
}
static uint64_t _mv88e6xxx_get_ethtool_stat(struct dsa_switch *ds,
- int stat,
- struct mv88e6xxx_hw_stat *stats,
+ struct mv88e6xxx_hw_stat *s,
int port)
{
- struct mv88e6xxx_hw_stat *s = stats + stat;
u32 low;
u32 high = 0;
int ret;
u64 value;
- if (s->reg >= 0x100) {
- ret = _mv88e6xxx_reg_read(ds, REG_PORT(port),
- s->reg - 0x100);
+ switch (s->type) {
+ case PORT:
+ ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), s->reg);
if (ret < 0)
return UINT64_MAX;
low = ret;
if (s->sizeof_stat == 4) {
ret = _mv88e6xxx_reg_read(ds, REG_PORT(port),
- s->reg - 0x100 + 1);
+ s->reg + 1);
if (ret < 0)
return UINT64_MAX;
high = ret;
}
- } else {
+ break;
+ case BANK0:
+ case BANK1:
_mv88e6xxx_stats_read(ds, s->reg, &low);
if (s->sizeof_stat == 8)
_mv88e6xxx_stats_read(ds, s->reg + 1, &high);
@@ -717,61 +731,59 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct dsa_switch *ds,
return value;
}
-static void _mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
- int nr_stats,
- struct mv88e6xxx_hw_stat *stats,
- int port, uint64_t *data)
+void mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int ret;
- int i;
-
- mutex_lock(&ps->smi_mutex);
+ struct mv88e6xxx_hw_stat *stat;
+ int i, j;
- ret = _mv88e6xxx_stats_snapshot(ds, port);
- if (ret < 0) {
- mutex_unlock(&ps->smi_mutex);
- return;
+ for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
+ stat = &mv88e6xxx_hw_stats[i];
+ if (mv88e6xxx_has_stat(ds, stat)) {
+ memcpy(data + j * ETH_GSTRING_LEN, stat->string,
+ ETH_GSTRING_LEN);
+ j++;
+ }
}
-
- /* Read each of the counters. */
- for (i = 0; i < nr_stats; i++)
- data[i] = _mv88e6xxx_get_ethtool_stat(ds, i, stats, port);
-
- mutex_unlock(&ps->smi_mutex);
-}
-
-/* All the statistics in the table */
-void
-mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
-{
- if (have_sw_in_discards(ds))
- _mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6xxx_hw_stats),
- mv88e6xxx_hw_stats, port, data);
- else
- _mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6xxx_hw_stats) - 3,
- mv88e6xxx_hw_stats, port, data);
}
int mv88e6xxx_get_sset_count(struct dsa_switch *ds)
{
- if (have_sw_in_discards(ds))
- return ARRAY_SIZE(mv88e6xxx_hw_stats);
- return ARRAY_SIZE(mv88e6xxx_hw_stats) - 3;
+ struct mv88e6xxx_hw_stat *stat;
+ int i, j;
+
+ for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
+ stat = &mv88e6xxx_hw_stats[i];
+ if (mv88e6xxx_has_stat(ds, stat))
+ j++;
+ }
+ return j;
}
void
mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
int port, uint64_t *data)
{
- if (have_sw_in_discards(ds))
- _mv88e6xxx_get_ethtool_stats(
- ds, ARRAY_SIZE(mv88e6xxx_hw_stats),
- mv88e6xxx_hw_stats, port, data);
- else
- _mv88e6xxx_get_ethtool_stats(
- ds, ARRAY_SIZE(mv88e6xxx_hw_stats) - 3,
- mv88e6xxx_hw_stats, port, data);
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct mv88e6xxx_hw_stat *stat;
+ int ret;
+ int i, j;
+
+ mutex_lock(&ps->smi_mutex);
+
+ ret = _mv88e6xxx_stats_snapshot(ds, port);
+ if (ret < 0) {
+ mutex_unlock(&ps->smi_mutex);
+ return;
+ }
+ for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
+ stat = &mv88e6xxx_hw_stats[i];
+ if (mv88e6xxx_has_stat(ds, stat)) {
+ data[j] = _mv88e6xxx_get_ethtool_stat(ds, stat, port);
+ j++;
+ }
+ }
+
+ mutex_unlock(&ps->smi_mutex);
}
int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h
index 21c8daa..ca08f91 100644
--- a/drivers/net/dsa/mv88e6xxx.h
+++ b/drivers/net/dsa/mv88e6xxx.h
@@ -288,6 +288,7 @@
#define GLOBAL_STATS_OP_HIST_RX ((1 << 10) | GLOBAL_STATS_OP_BUSY)
#define GLOBAL_STATS_OP_HIST_TX ((2 << 10) | GLOBAL_STATS_OP_BUSY)
#define GLOBAL_STATS_OP_HIST_RX_TX ((3 << 10) | GLOBAL_STATS_OP_BUSY)
+#define GLOBAL_STATS_OP_BANK_1 BIT(9)
#define GLOBAL_STATS_COUNTER_32 0x1e
#define GLOBAL_STATS_COUNTER_01 0x1f
@@ -420,10 +421,17 @@ struct mv88e6xxx_priv_state {
struct work_struct bridge_work;
};
+enum stat_type {
+ BANK0,
+ BANK1,
+ PORT,
+};
+
struct mv88e6xxx_hw_stat {
char string[ETH_GSTRING_LEN];
int sizeof_stat;
int reg;
+ enum stat_type type;
};
int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 443ef92..319653a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -72,8 +72,10 @@ MODULE_VERSION(DRV_MODULE_VERSION);
#define BNXT_TX_PUSH_THRESH 92
enum board_idx {
+ BCM57301,
BCM57302,
BCM57304,
+ BCM57402,
BCM57404,
BCM57406,
BCM57304_VF,
@@ -84,17 +86,21 @@ enum board_idx {
static const struct {
char *name;
} board_info[] = {
- { "Broadcom BCM57302 NetXtreme-C Single-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
+ { "Broadcom BCM57301 NetXtreme-C Single-port 10Gb Ethernet" },
+ { "Broadcom BCM57302 NetXtreme-C Dual-port 10Gb/25Gb Ethernet" },
{ "Broadcom BCM57304 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
+ { "Broadcom BCM57402 NetXtreme-E Dual-port 10Gb Ethernet" },
{ "Broadcom BCM57404 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" },
- { "Broadcom BCM57406 NetXtreme-E Dual-port 10Gb Ethernet" },
+ { "Broadcom BCM57406 NetXtreme-E Dual-port 10GBase-T Ethernet" },
{ "Broadcom BCM57304 NetXtreme-C Ethernet Virtual Function" },
{ "Broadcom BCM57404 NetXtreme-E Ethernet Virtual Function" },
};
static const struct pci_device_id bnxt_pci_tbl[] = {
+ { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
{ PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
{ PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
+ { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
{ PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
{ PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
#ifdef CONFIG_BNXT_SRIOV
@@ -856,8 +862,13 @@ static inline struct sk_buff *bnxt_gro_skb(struct bnxt_tpa_info *tpa_info,
struct tcphdr *th;
int payload_off, tcp_opt_len = 0;
int len, nw_off;
+ u16 segs;
- NAPI_GRO_CB(skb)->count = TPA_END_TPA_SEGS(tpa_end);
+ segs = TPA_END_TPA_SEGS(tpa_end);
+ if (segs == 1)
+ return skb;
+
+ NAPI_GRO_CB(skb)->count = segs;
skb_shinfo(skb)->gso_size =
le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
skb_shinfo(skb)->gso_type = tpa_info->gso_type;
@@ -1187,8 +1198,10 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
skb->csum_level = RX_CMP_ENCAP(rxcmp1);
}
} else {
- if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS)
- cpr->rx_l4_csum_errors++;
+ if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
+ if (dev->features & NETIF_F_RXCSUM)
+ cpr->rx_l4_csum_errors++;
+ }
}
skb_record_rx_queue(skb, bnapi->index);
@@ -1969,11 +1982,12 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
rxr->rx_prod = prod;
ring->fw_ring_id = INVALID_HW_RING_ID;
+ ring = &rxr->rx_agg_ring_struct;
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+
if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
return 0;
- ring = &rxr->rx_agg_ring_struct;
-
type = ((u32)PAGE_SIZE << RX_BD_LEN_SHIFT) |
RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
@@ -1989,7 +2003,6 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
prod = NEXT_RX_AGG(prod);
}
rxr->rx_agg_prod = prod;
- ring->fw_ring_id = INVALID_HW_RING_ID;
if (bp->flags & BNXT_FLAG_TPA) {
if (rxr->rx_tpa) {
@@ -2710,6 +2723,14 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
+static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
+{
+ struct hwrm_func_drv_unrgtr_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
{
u32 rc = 0;
@@ -2772,7 +2793,7 @@ static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
- req.dflt_vnic_id = cpu_to_le32(vnic->fw_vnic_id);
+ req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
@@ -2805,7 +2826,7 @@ static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
- CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID)
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
struct bnxt_ntuple_filter *fltr)
@@ -2824,7 +2845,7 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
req.ethertype = htons(ETH_P_IP);
memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
- req.ipaddr_type = 4;
+ req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
req.ip_protocol = keys->basic.ip_proto;
req.src_ipaddr[0] = keys->addrs.v4addrs.src;
@@ -2837,7 +2858,7 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
req.dst_port = keys->ports.dst;
req.dst_port_mask = cpu_to_be16(0xffff);
- req.dst_vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+ req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc)
@@ -2857,10 +2878,10 @@ static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX |
CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
- req.dst_vnic_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
+ req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
req.enables =
cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
- CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID |
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
memcpy(req.l2_addr, mac_addr, ETH_ALEN);
req.l2_addr_mask[0] = 0xff;
@@ -2930,7 +2951,8 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
req.enables =
cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
- VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS);
+ VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
+ VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
/* Number of segs are log2 units, and first packet is not
* included as part of this units.
@@ -2948,6 +2970,8 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
segs = ilog2(nsegs);
req.max_agg_segs = cpu_to_le16(segs);
req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX);
+
+ req.min_agg_len = cpu_to_le32(512);
}
req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
@@ -3293,54 +3317,45 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
{
int i, rc = 0;
- if (bp->cp_nr_rings) {
- for (i = 0; i < bp->cp_nr_rings; i++) {
- struct bnxt_napi *bnapi = bp->bnapi[i];
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
- struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
- rc = hwrm_ring_alloc_send_msg(bp, ring,
- HWRM_RING_ALLOC_CMPL, i,
- INVALID_STATS_CTX_ID);
- if (rc)
- goto err_out;
- cpr->cp_doorbell = bp->bar1 + i * 0x80;
- BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
- bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
- }
+ rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i,
+ INVALID_STATS_CTX_ID);
+ if (rc)
+ goto err_out;
+ cpr->cp_doorbell = bp->bar1 + i * 0x80;
+ BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
+ bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
}
- if (bp->tx_nr_rings) {
- for (i = 0; i < bp->tx_nr_rings; i++) {
- struct bnxt_napi *bnapi = bp->bnapi[i];
- struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
- struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
- u16 fw_stats_ctx = bp->grp_info[i].fw_stats_ctx;
+ for (i = 0; i < bp->tx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
+ struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+ u16 fw_stats_ctx = bp->grp_info[i].fw_stats_ctx;
- rc = hwrm_ring_alloc_send_msg(bp, ring,
- HWRM_RING_ALLOC_TX, i,
- fw_stats_ctx);
- if (rc)
- goto err_out;
- txr->tx_doorbell = bp->bar1 + i * 0x80;
- }
+ rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_TX, i,
+ fw_stats_ctx);
+ if (rc)
+ goto err_out;
+ txr->tx_doorbell = bp->bar1 + i * 0x80;
}
- if (bp->rx_nr_rings) {
- for (i = 0; i < bp->rx_nr_rings; i++) {
- struct bnxt_napi *bnapi = bp->bnapi[i];
- struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
- struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+ struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
- rc = hwrm_ring_alloc_send_msg(bp, ring,
- HWRM_RING_ALLOC_RX, i,
- INVALID_STATS_CTX_ID);
- if (rc)
- goto err_out;
- rxr->rx_doorbell = bp->bar1 + i * 0x80;
- writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
- bp->grp_info[i].rx_fw_ring_id = ring->fw_ring_id;
- }
+ rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_RX, i,
+ INVALID_STATS_CTX_ID);
+ if (rc)
+ goto err_out;
+ rxr->rx_doorbell = bp->bar1 + i * 0x80;
+ writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
+ bp->grp_info[i].rx_fw_ring_id = ring->fw_ring_id;
}
if (bp->flags & BNXT_FLAG_AGG_RINGS) {
@@ -3408,91 +3423,73 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp,
return 0;
}
-static int bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
+static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
{
- int i, rc = 0;
+ int i;
if (!bp->bnapi)
- return 0;
+ return;
- if (bp->tx_nr_rings) {
- for (i = 0; i < bp->tx_nr_rings; i++) {
- struct bnxt_napi *bnapi = bp->bnapi[i];
- struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
- struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
- u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id;
-
- if (ring->fw_ring_id != INVALID_HW_RING_ID) {
- hwrm_ring_free_send_msg(
- bp, ring,
- RING_FREE_REQ_RING_TYPE_TX,
- close_path ? cmpl_ring_id :
- INVALID_HW_RING_ID);
- ring->fw_ring_id = INVALID_HW_RING_ID;
- }
+ for (i = 0; i < bp->tx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
+ struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+ u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id;
+
+ if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ hwrm_ring_free_send_msg(bp, ring,
+ RING_FREE_REQ_RING_TYPE_TX,
+ close_path ? cmpl_ring_id :
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
}
}
- if (bp->rx_nr_rings) {
- for (i = 0; i < bp->rx_nr_rings; i++) {
- struct bnxt_napi *bnapi = bp->bnapi[i];
- struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
- struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
- u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id;
-
- if (ring->fw_ring_id != INVALID_HW_RING_ID) {
- hwrm_ring_free_send_msg(
- bp, ring,
- RING_FREE_REQ_RING_TYPE_RX,
- close_path ? cmpl_ring_id :
- INVALID_HW_RING_ID);
- ring->fw_ring_id = INVALID_HW_RING_ID;
- bp->grp_info[i].rx_fw_ring_id =
- INVALID_HW_RING_ID;
- }
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+ struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
+ u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id;
+
+ if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ hwrm_ring_free_send_msg(bp, ring,
+ RING_FREE_REQ_RING_TYPE_RX,
+ close_path ? cmpl_ring_id :
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
}
}
- if (bp->rx_agg_nr_pages) {
- for (i = 0; i < bp->rx_nr_rings; i++) {
- struct bnxt_napi *bnapi = bp->bnapi[i];
- struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
- struct bnxt_ring_struct *ring =
- &rxr->rx_agg_ring_struct;
- u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id;
-
- if (ring->fw_ring_id != INVALID_HW_RING_ID) {
- hwrm_ring_free_send_msg(
- bp, ring,
- RING_FREE_REQ_RING_TYPE_RX,
- close_path ? cmpl_ring_id :
- INVALID_HW_RING_ID);
- ring->fw_ring_id = INVALID_HW_RING_ID;
- bp->grp_info[i].agg_fw_ring_id =
- INVALID_HW_RING_ID;
- }
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+ struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
+ u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id;
+
+ if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ hwrm_ring_free_send_msg(bp, ring,
+ RING_FREE_REQ_RING_TYPE_RX,
+ close_path ? cmpl_ring_id :
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
}
}
- if (bp->cp_nr_rings) {
- for (i = 0; i < bp->cp_nr_rings; i++) {
- struct bnxt_napi *bnapi = bp->bnapi[i];
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
- struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
-
- if (ring->fw_ring_id != INVALID_HW_RING_ID) {
- hwrm_ring_free_send_msg(
- bp, ring,
- RING_FREE_REQ_RING_TYPE_CMPL,
- INVALID_HW_RING_ID);
- ring->fw_ring_id = INVALID_HW_RING_ID;
- bp->grp_info[i].cp_fw_ring_id =
- INVALID_HW_RING_ID;
- }
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+
+ if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ hwrm_ring_free_send_msg(bp, ring,
+ RING_FREE_REQ_RING_TYPE_CMPL,
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
}
}
-
- return rc;
}
int bnxt_hwrm_set_coal(struct bnxt *bp)
@@ -3604,7 +3601,7 @@ static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
return 0;
}
-static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
+int bnxt_hwrm_func_qcaps(struct bnxt *bp)
{
int rc = 0;
struct hwrm_func_qcaps_input req = {0};
@@ -3628,9 +3625,10 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
- pf->max_pf_tx_rings = pf->max_tx_rings;
pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
- pf->max_pf_rx_rings = pf->max_rx_rings;
+ pf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
+ if (!pf->max_hw_ring_grps)
+ pf->max_hw_ring_grps = pf->max_tx_rings;
pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
pf->max_vnics = le16_to_cpu(resp->max_vnics);
pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
@@ -3658,6 +3656,9 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
+ vf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
+ if (!vf->max_hw_ring_grps)
+ vf->max_hw_ring_grps = vf->max_tx_rings;
vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
vf->max_vnics = le16_to_cpu(resp->max_vnics);
vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
@@ -3734,14 +3735,11 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
- if (req.hwrm_intf_maj != resp->hwrm_intf_maj ||
- req.hwrm_intf_min != resp->hwrm_intf_min ||
- req.hwrm_intf_upd != resp->hwrm_intf_upd) {
- netdev_warn(bp->dev, "HWRM interface %d.%d.%d does not match driver interface %d.%d.%d.\n",
+ if (resp->hwrm_intf_maj < 1) {
+ netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
resp->hwrm_intf_maj, resp->hwrm_intf_min,
- resp->hwrm_intf_upd, req.hwrm_intf_maj,
- req.hwrm_intf_min, req.hwrm_intf_upd);
- netdev_warn(bp->dev, "Please update driver or firmware with matching interface versions.\n");
+ resp->hwrm_intf_upd);
+ netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
}
snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "bc %d.%d.%d rm %d.%d.%d",
resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld,
@@ -3944,8 +3942,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
}
bp->vnic_info[0].uc_filter_count = 1;
- bp->vnic_info[0].rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_UNICAST |
- CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
+ bp->vnic_info[0].rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
if ((bp->dev->flags & IFF_PROMISC) && BNXT_PF(bp))
bp->vnic_info[0].rx_mask |=
@@ -4026,10 +4023,8 @@ static int bnxt_set_real_num_queues(struct bnxt *bp)
return rc;
#ifdef CONFIG_RFS_ACCEL
- if (bp->rx_nr_rings)
+ if (bp->flags & BNXT_FLAG_RFS)
dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
- if (!dev->rx_cpu_rmap)
- rc = -ENOMEM;
#endif
return rc;
@@ -4353,7 +4348,7 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
link_info->auto_mode = resp->auto_mode;
link_info->auto_pause_setting = resp->auto_pause;
link_info->force_pause_setting = resp->force_pause;
- link_info->duplex_setting = resp->duplex_setting;
+ link_info->duplex_setting = resp->duplex;
if (link_info->phy_link_status == BNXT_LINK_LINK)
link_info->link_speed = le16_to_cpu(resp->link_speed);
else
@@ -4930,9 +4925,32 @@ skip_uc:
return rc;
}
+static bool bnxt_rfs_capable(struct bnxt *bp)
+{
+#ifdef CONFIG_RFS_ACCEL
+ struct bnxt_pf_info *pf = &bp->pf;
+ int vnics;
+
+ if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_MSIX_CAP))
+ return false;
+
+ vnics = 1 + bp->rx_nr_rings;
+ if (vnics > pf->max_rsscos_ctxs || vnics > pf->max_vnics)
+ return false;
+
+ return true;
+#else
+ return false;
+#endif
+}
+
static netdev_features_t bnxt_fix_features(struct net_device *dev,
netdev_features_t features)
{
+ struct bnxt *bp = netdev_priv(dev);
+
+ if (!bnxt_rfs_capable(bp))
+ features &= ~NETIF_F_NTUPLE;
return features;
}
@@ -4973,7 +4991,7 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
bp->flags = flags;
- if (!netif_running(dev)) {
+ if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
if (update_tpa)
bnxt_set_ring_params(bp);
return rc;
@@ -5549,6 +5567,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
cancel_work_sync(&bp->sp_task);
bp->sp_event = 0;
+ bnxt_hwrm_func_drv_unrgtr(bp);
bnxt_free_hwrm_resources(bp);
pci_iounmap(pdev, bp->bar2);
pci_iounmap(pdev, bp->bar1);
@@ -5610,25 +5629,28 @@ static int bnxt_get_max_irq(struct pci_dev *pdev)
void bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx)
{
- int max_rings = 0;
+ int max_rings = 0, max_ring_grps = 0;
if (BNXT_PF(bp)) {
- *max_tx = bp->pf.max_pf_tx_rings;
- *max_rx = bp->pf.max_pf_rx_rings;
+ *max_tx = bp->pf.max_tx_rings;
+ *max_rx = bp->pf.max_rx_rings;
max_rings = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings);
max_rings = min_t(int, max_rings, bp->pf.max_stat_ctxs);
+ max_ring_grps = bp->pf.max_hw_ring_grps;
} else {
#ifdef CONFIG_BNXT_SRIOV
*max_tx = bp->vf.max_tx_rings;
*max_rx = bp->vf.max_rx_rings;
max_rings = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings);
max_rings = min_t(int, max_rings, bp->vf.max_stat_ctxs);
+ max_ring_grps = bp->vf.max_hw_ring_grps;
#endif
}
if (bp->flags & BNXT_FLAG_AGG_RINGS)
*max_rx >>= 1;
*max_rx = min_t(int, *max_rx, max_rings);
+ *max_rx = min_t(int, *max_rx, max_ring_grps);
*max_tx = min_t(int, *max_tx, max_rings);
}
@@ -5652,11 +5674,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (bnxt_vf_pciid(ent->driver_data))
bp->flags |= BNXT_FLAG_VF;
- if (pdev->msix_cap) {
+ if (pdev->msix_cap)
bp->flags |= BNXT_FLAG_MSIX_CAP;
- if (BNXT_PF(bp))
- bp->flags |= BNXT_FLAG_RFS;
- }
rc = bnxt_init_board(pdev, dev);
if (rc < 0)
@@ -5675,9 +5694,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_RXHASH |
NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO;
- if (bp->flags & BNXT_FLAG_RFS)
- dev->hw_features |= NETIF_F_NTUPLE;
-
dev->hw_enc_features =
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO6 |
@@ -5736,6 +5752,14 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bp->cp_nr_rings = max_t(int, bp->rx_nr_rings, bp->tx_nr_rings);
bp->num_stat_ctxs = bp->cp_nr_rings;
+ if (BNXT_PF(bp)) {
+ dev->hw_features |= NETIF_F_NTUPLE;
+ if (bnxt_rfs_capable(bp)) {
+ bp->flags |= BNXT_FLAG_RFS;
+ dev->features |= NETIF_F_NTUPLE;
+ }
+ }
+
if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
bp->flags |= BNXT_FLAG_STRIP_VLAN;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index f199f4c..78d639d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -11,11 +11,11 @@
#define BNXT_H
#define DRV_MODULE_NAME "bnxt_en"
-#define DRV_MODULE_VERSION "0.1.24"
+#define DRV_MODULE_VERSION "1.0.0"
-#define DRV_VER_MAJ 0
-#define DRV_VER_MIN 1
-#define DRV_VER_UPD 24
+#define DRV_VER_MAJ 1
+#define DRV_VER_MIN 0
+#define DRV_VER_UPD 0
struct tx_bd {
__le32 tx_bd_len_flags_type;
@@ -695,6 +695,7 @@ struct bnxt_vf_info {
u16 max_cp_rings;
u16 max_tx_rings;
u16 max_rx_rings;
+ u16 max_hw_ring_grps;
u16 max_l2_ctxs;
u16 max_irqs;
u16 max_vnics;
@@ -722,9 +723,8 @@ struct bnxt_pf_info {
u16 max_rsscos_ctxs;
u16 max_cp_rings;
u16 max_tx_rings; /* HW assigned max tx rings for this PF */
- u16 max_pf_tx_rings; /* runtime max tx rings owned by PF */
u16 max_rx_rings; /* HW assigned max rx rings for this PF */
- u16 max_pf_rx_rings; /* runtime max rx rings owned by PF */
+ u16 max_hw_ring_grps;
u16 max_irqs;
u16 max_l2_ctxs;
u16 max_vnics;
@@ -1084,6 +1084,7 @@ void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16);
int _hwrm_send_message(struct bnxt *, void *, u32, int);
int hwrm_send_message(struct bnxt *, void *, u32, int);
int bnxt_hwrm_set_coal(struct bnxt *);
+int bnxt_hwrm_func_qcaps(struct bnxt *);
int bnxt_hwrm_set_pause(struct bnxt *);
int bnxt_hwrm_set_link_setting(struct bnxt *, bool);
int bnxt_open_nic(struct bnxt *, bool, bool);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 45bd628..a39511f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -266,6 +266,8 @@ static int bnxt_set_channels(struct net_device *dev,
bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
bp->num_stat_ctxs = bp->cp_nr_rings;
+ /* After changing number of rx channels, update NTUPLE feature. */
+ netdev_update_features(dev);
if (netif_running(dev)) {
rc = bnxt_open_nic(bp, true, false);
if ((!rc) && BNXT_PF(bp)) {
@@ -818,6 +820,9 @@ static int bnxt_flash_firmware(struct net_device *dev,
case BNX_DIR_TYPE_BOOTCODE_2:
code_type = CODE_BOOT;
break;
+ case BNX_DIR_TYPE_APE_FW:
+ code_type = CODE_MCTP_PASSTHRU;
+ break;
default:
netdev_err(dev, "Unsupported directory entry type: %u\n",
dir_type);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 70fc825..4badbed 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -103,19 +103,22 @@ struct hwrm_async_event_cmpl {
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE (0x2UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE (0x3UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED (0x4UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED (0x5UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD (0x10UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD (0x11UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD (0x20UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD (0x20UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD (0x21UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR (0x30UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE (0x31UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE (0x32UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR (0xffUL << 0)
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_V 0x1UL
#define HWRM_ASYNC_EVENT_CMPL_OPAQUE_MASK 0xfeUL
#define HWRM_ASYNC_EVENT_CMPL_OPAQUE_SFT 1
- u8 unused_1[3];
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
__le32 event_data1;
};
@@ -132,9 +135,16 @@ struct hwrm_async_event_cmpl_link_status_change {
#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V 0x1UL
#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1
- u8 unused_1[3];
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
__le32 event_data1;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_UP 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN (0x0UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP (0x1UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK 0xeUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT 4
};
/* HWRM Asynchronous Event Completion Record for link MTU change (16 bytes) */
@@ -150,7 +160,8 @@ struct hwrm_async_event_cmpl_link_mtu_change {
#define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_V 0x1UL
#define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_MASK 0xfeUL
#define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_SFT 1
- u8 unused_1[3];
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
__le32 event_data1;
#define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_MASK 0xffffUL
#define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_SFT 0
@@ -169,7 +180,8 @@ struct hwrm_async_event_cmpl_link_speed_change {
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_V 0x1UL
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_MASK 0xfeUL
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_SFT 1
- u8 unused_1[3];
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
__le32 event_data1;
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_FORCE 0x1UL
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_MASK 0xfffeUL
@@ -200,7 +212,8 @@ struct hwrm_async_event_cmpl_dcb_config_change {
#define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_V 0x1UL
#define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_MASK 0xfeUL
#define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_SFT 1
- u8 unused_1[3];
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
__le32 event_data1;
#define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
#define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
@@ -219,7 +232,8 @@ struct hwrm_async_event_cmpl_port_conn_not_allowed {
#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V 0x1UL
#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_MASK 0xfeUL
#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1
- u8 unused_1[3];
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
__le32 event_data1;
#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
@@ -238,7 +252,8 @@ struct hwrm_async_event_cmpl_func_drvr_unload {
#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_V 0x1UL
#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL
#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_SFT 1
- u8 unused_1[3];
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
__le32 event_data1;
#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0
@@ -257,7 +272,8 @@ struct hwrm_async_event_cmpl_func_drvr_load {
#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_V 0x1UL
#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_MASK 0xfeUL
#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_SFT 1
- u8 unused_1[3];
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
__le32 event_data1;
#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0
@@ -276,10 +292,13 @@ struct hwrm_async_event_cmpl_pf_drvr_unload {
#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V 0x1UL
#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL
#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT 1
- u8 unused_1[3];
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
__le32 event_data1;
#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_MASK 0x70000UL
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_SFT 16
};
/* HWRM Asynchronous Event Completion Record for PF Driver load (16 bytes) */
@@ -289,16 +308,19 @@ struct hwrm_async_event_cmpl_pf_drvr_load {
#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_SFT 0
#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD (0x20UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD (0x21UL << 0)
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_V 0x1UL
#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_MASK 0xfeUL
#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_SFT 1
- u8 unused_1[3];
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
__le32 event_data1;
#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_MASK 0x70000UL
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_SFT 16
};
/* HWRM Asynchronous Event Completion Record for VF FLR (16 bytes) */
@@ -314,7 +336,8 @@ struct hwrm_async_event_cmpl_vf_flr {
#define HWRM_ASYNC_EVENT_CMPL_VF_FLR_V 0x1UL
#define HWRM_ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_MASK 0xfeUL
#define HWRM_ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_SFT 1
- u8 unused_1[3];
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
__le32 event_data1;
#define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_MASK 0xffffUL
#define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_SFT 0
@@ -333,7 +356,8 @@ struct hwrm_async_event_cmpl_vf_mac_addr_change {
#define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_V 0x1UL
#define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_MASK 0xfeUL
#define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_SFT 1
- u8 unused_1[3];
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
__le32 event_data1;
#define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_MASK 0xffffUL
#define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_SFT 0
@@ -357,18 +381,20 @@ struct hwrm_async_event_cmpl_hwrm_error {
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_V 0x1UL
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK 0xfeUL
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT 1
- u8 unused_1[3];
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
__le32 event_data1;
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL
};
-/* HW Resource Manager Specification 0.7.8 */
-#define HWRM_VERSION_MAJOR 0
-#define HWRM_VERSION_MINOR 7
-#define HWRM_VERSION_UPDATE 8
+/* HW Resource Manager Specification 1.0.0 */
+#define HWRM_VERSION_MAJOR 1
+#define HWRM_VERSION_MINOR 0
+#define HWRM_VERSION_UPDATE 0
-#define HWRM_VERSION_STR "0.7.8"
-/* Following is the signature for HWRM message field that indicates not
+#define HWRM_VERSION_STR "1.0.0"
+/*
+ * Following is the signature for HWRM message field that indicates not
* applicable (All F's). Need to cast it the size of the field if needed.
*/
#define HWRM_NA_SIGNATURE ((__le32)(-1))
@@ -398,7 +424,9 @@ struct output {
struct cmd_nums {
__le16 req_type;
#define HWRM_VER_GET (0x0UL)
- #define HWRM_FUNC_DISABLE (0x10UL)
+ #define HWRM_FUNC_BUF_UNRGTR (0xeUL)
+ #define HWRM_FUNC_VF_CFG (0xfUL)
+ #define RESERVED1 (0x10UL)
#define HWRM_FUNC_RESET (0x11UL)
#define HWRM_FUNC_GETFID (0x12UL)
#define HWRM_FUNC_VF_ALLOC (0x13UL)
@@ -414,10 +442,9 @@ struct cmd_nums {
#define HWRM_FUNC_DRV_RGTR (0x1dUL)
#define HWRM_FUNC_DRV_QVER (0x1eUL)
#define HWRM_FUNC_BUF_RGTR (0x1fUL)
- #define HWRM_FUNC_VF_CFG (0x20UL)
#define HWRM_PORT_PHY_CFG (0x20UL)
#define HWRM_PORT_MAC_CFG (0x21UL)
- #define HWRM_PORT_ENABLE (0x22UL)
+ #define RESERVED2 (0x22UL)
#define HWRM_PORT_QSTATS (0x23UL)
#define HWRM_PORT_LPBK_QSTATS (0x24UL)
#define HWRM_PORT_CLR_STATS (0x25UL)
@@ -455,13 +482,11 @@ struct cmd_nums {
#define HWRM_RING_GRP_FREE (0x61UL)
#define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC (0x70UL)
#define HWRM_VNIC_RSS_COS_LB_CTX_FREE (0x71UL)
- #define HWRM_ARB_GRP_ALLOC (0x80UL)
- #define HWRM_ARB_GRP_CFG (0x81UL)
#define HWRM_CFA_L2_FILTER_ALLOC (0x90UL)
#define HWRM_CFA_L2_FILTER_FREE (0x91UL)
#define HWRM_CFA_L2_FILTER_CFG (0x92UL)
#define HWRM_CFA_L2_SET_RX_MASK (0x93UL)
- #define HWRM_CFA_L2_SET_BCASTMCAST_MIRRORING (0x94UL)
+ #define RESERVED3 (0x94UL)
#define HWRM_CFA_TUNNEL_FILTER_ALLOC (0x95UL)
#define HWRM_CFA_TUNNEL_FILTER_FREE (0x96UL)
#define HWRM_CFA_ENCAP_RECORD_ALLOC (0x97UL)
@@ -469,6 +494,9 @@ struct cmd_nums {
#define HWRM_CFA_NTUPLE_FILTER_ALLOC (0x99UL)
#define HWRM_CFA_NTUPLE_FILTER_FREE (0x9aUL)
#define HWRM_CFA_NTUPLE_FILTER_CFG (0x9bUL)
+ #define HWRM_CFA_EM_FLOW_ALLOC (0x9cUL)
+ #define HWRM_CFA_EM_FLOW_FREE (0x9dUL)
+ #define HWRM_CFA_EM_FLOW_CFG (0x9eUL)
#define HWRM_TUNNEL_DST_PORT_QUERY (0xa0UL)
#define HWRM_TUNNEL_DST_PORT_ALLOC (0xa1UL)
#define HWRM_TUNNEL_DST_PORT_FREE (0xa2UL)
@@ -483,8 +511,6 @@ struct cmd_nums {
#define HWRM_FWD_RESP (0xd2UL)
#define HWRM_FWD_ASYNC_EVENT_CMPL (0xd3UL)
#define HWRM_TEMP_MONITOR_QUERY (0xe0UL)
- #define HWRM_MGMT_L2_FILTER_ALLOC (0x100UL)
- #define HWRM_MGMT_L2_FILTER_FREE (0x101UL)
#define HWRM_DBG_READ_DIRECT (0xff10UL)
#define HWRM_DBG_READ_INDIRECT (0xff11UL)
#define HWRM_DBG_WRITE_DIRECT (0xff12UL)
@@ -505,7 +531,6 @@ struct cmd_nums {
__le16 unused_0[3];
};
-/* Return Codes (8 bytes) */
struct ret_codes {
__le16 error_code;
#define HWRM_ERR_CODE_SUCCESS (0x0UL)
@@ -529,7 +554,7 @@ struct hwrm_err_output {
__le16 resp_len;
__le32 opaque_0;
__le16 opaque_1;
- u8 opaque_2;
+ u8 cmd_err;
u8 valid;
};
@@ -686,65 +711,38 @@ struct hwrm_ver_get_output {
u8 hwrm_fw_min;
u8 hwrm_fw_bld;
u8 hwrm_fw_rsvd;
- u8 ape_fw_maj;
- u8 ape_fw_min;
- u8 ape_fw_bld;
- u8 ape_fw_rsvd;
- u8 kong_fw_maj;
- u8 kong_fw_min;
- u8 kong_fw_bld;
- u8 kong_fw_rsvd;
- u8 tang_fw_maj;
- u8 tang_fw_min;
- u8 tang_fw_bld;
- u8 tang_fw_rsvd;
- u8 bono_fw_maj;
- u8 bono_fw_min;
- u8 bono_fw_bld;
- u8 bono_fw_rsvd;
+ u8 mgmt_fw_maj;
+ u8 mgmt_fw_min;
+ u8 mgmt_fw_bld;
+ u8 mgmt_fw_rsvd;
+ u8 netctrl_fw_maj;
+ u8 netctrl_fw_min;
+ u8 netctrl_fw_bld;
+ u8 netctrl_fw_rsvd;
+ __le32 reserved1;
+ u8 roce_fw_maj;
+ u8 roce_fw_min;
+ u8 roce_fw_bld;
+ u8 roce_fw_rsvd;
char hwrm_fw_name[16];
- char ape_fw_name[16];
- char kong_fw_name[16];
- char tang_fw_name[16];
- char bono_fw_name[16];
+ char mgmt_fw_name[16];
+ char netctrl_fw_name[16];
+ __le32 reserved2[4];
+ char roce_fw_name[16];
__le16 chip_num;
u8 chip_rev;
u8 chip_metal;
u8 chip_bond_id;
- u8 unused_0;
+ u8 chip_platform_type;
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_ASIC (0x0UL << 0)
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_FPGA (0x1UL << 0)
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM (0x2UL << 0)
__le16 max_req_win_len;
__le16 max_resp_len;
__le16 def_req_timeout;
+ u8 unused_0;
u8 unused_1;
u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_func_disable */
-/* Input (24 bytes) */
-struct hwrm_func_disable_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_DISABLE_REQ_ENABLES_VF_ID_VALID 0x1UL
- __le16 vf_id;
- __le16 unused_0;
-};
-
-/* Output (16 bytes) */
-struct hwrm_func_disable_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
u8 valid;
};
@@ -759,7 +757,12 @@ struct hwrm_func_reset_input {
__le32 enables;
#define FUNC_RESET_REQ_ENABLES_VF_ID_VALID 0x1UL
__le16 vf_id;
- __le16 unused_0;
+ u8 func_reset_level;
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETALL (0x0UL << 0)
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME (0x1UL << 0)
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETCHILDREN (0x2UL << 0)
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF (0x3UL << 0)
+ u8 unused_0;
};
/* Output (16 bytes) */
@@ -861,7 +864,7 @@ struct hwrm_func_vf_free_output {
};
/* hwrm_func_vf_cfg */
-/* Input (24 bytes) */
+/* Input (32 bytes) */
struct hwrm_func_vf_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -871,8 +874,11 @@ struct hwrm_func_vf_cfg_input {
__le32 enables;
#define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL
#define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL
+ #define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4UL
__le16 mtu;
__le16 guest_vlan;
+ __le16 async_event_cr;
+ __le16 unused_0[3];
};
/* Output (16 bytes) */
@@ -944,7 +950,7 @@ struct hwrm_func_cfg_input {
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
- __le16 vf_id;
+ __le16 fid;
u8 unused_0;
u8 unused_1;
__le32 flags;
@@ -1000,10 +1006,6 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE (0x2UL << 0)
#define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN (0x3UL << 0)
u8 allowed_vlan_pris;
- #define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_NOCHECK (0x0UL << 0)
- #define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_VALIDATE_VLAN (0x1UL << 0)
- #define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_INSERT_IF_VLANDNE (0x2UL << 0)
- #define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_INSERT_OR_OVERRIDE_VLAN (0x3UL << 0)
u8 evb_mode;
#define FUNC_CFG_REQ_EVB_MODE_NO_EVB (0x0UL << 0)
#define FUNC_CFG_REQ_EVB_MODE_VEB (0x1UL << 0)
@@ -1166,6 +1168,15 @@ struct hwrm_func_drv_rgtr_input {
#define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD 0x8UL
#define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD 0x10UL
__le16 os_type;
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN (0x0UL << 0)
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER (0x1UL << 0)
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS (0xeUL << 0)
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS (0x1dUL << 0)
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX (0x24UL << 0)
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD (0x2aUL << 0)
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_ESXI (0x68UL << 0)
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN864 (0x73UL << 0)
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2 (0x74UL << 0)
u8 ver_maj;
u8 ver_min;
u8 ver_upd;
@@ -1276,9 +1287,7 @@ struct hwrm_func_drv_qver_input {
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
- __le32 enables;
- #define FUNC_DRV_QVER_REQ_ENABLES_OS_TYPE_VALID 0x1UL
- #define FUNC_DRV_QVER_REQ_ENABLES_VER_VALID 0x2UL
+ __le32 reserved;
__le16 fid;
__le16 unused_0;
};
@@ -1290,6 +1299,15 @@ struct hwrm_func_drv_qver_output {
__le16 seq_id;
__le16 resp_len;
__le16 os_type;
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN (0x0UL << 0)
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER (0x1UL << 0)
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS (0xeUL << 0)
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS (0x1dUL << 0)
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX (0x24UL << 0)
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD (0x2aUL << 0)
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_ESXI (0x68UL << 0)
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN864 (0x73UL << 0)
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2 (0x74UL << 0)
u8 ver_maj;
u8 ver_min;
u8 ver_upd;
@@ -1498,9 +1516,7 @@ struct hwrm_port_phy_qcfg_output {
u8 force_pause;
#define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL
#define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL
- u8 duplex_setting;
- #define PORT_PHY_QCFG_RESP_DUPLEX_SETTING_HALF (0x0UL << 0)
- #define PORT_PHY_QCFG_RESP_DUPLEX_SETTING_FULL (0x1UL << 0)
+ u8 reserved1;
__le32 preemphasis;
u8 phy_maj;
u8 phy_min;
@@ -1601,33 +1617,6 @@ struct hwrm_port_mac_cfg_output {
u8 valid;
};
-/* hwrm_port_enable */
-/* Input (24 bytes) */
-struct hwrm_port_enable_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define PORT_ENABLE_REQ_FLAGS_FORWARD_TRAFFIC 0x1UL
- __le16 port_id;
- __le16 unused_0;
-};
-
-/* Output (16 bytes) */
-struct hwrm_port_enable_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
/* hwrm_port_qstats */
/* Input (40 bytes) */
struct hwrm_port_qstats_input {
@@ -1651,10 +1640,11 @@ struct hwrm_port_qstats_output {
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
- __le32 unused_0;
+ __le16 tx_stat_size;
+ __le16 rx_stat_size;
+ u8 unused_0;
u8 unused_1;
u8 unused_2;
- u8 unused_3;
u8 valid;
};
@@ -1668,7 +1658,7 @@ struct hwrm_port_lpbk_qstats_input {
__le64 resp_addr;
};
-/* Output (64 bytes) */
+/* Output (96 bytes) */
struct hwrm_port_lpbk_qstats_output {
__le16 error_code;
__le16 req_type;
@@ -1680,6 +1670,10 @@ struct hwrm_port_lpbk_qstats_output {
__le64 lpbk_ucast_bytes;
__le64 lpbk_mcast_bytes;
__le64 lpbk_bcast_bytes;
+ __le64 tx_stat_discard;
+ __le64 tx_stat_error;
+ __le64 rx_stat_discard;
+ __le64 rx_stat_error;
__le32 unused_0;
u8 unused_1;
u8 unused_2;
@@ -1884,12 +1878,11 @@ struct hwrm_queue_buffers_cfg_input {
__le32 enables;
#define QUEUE_BUFFERS_CFG_REQ_ENABLES_RESERVED 0x1UL
#define QUEUE_BUFFERS_CFG_REQ_ENABLES_SHARED 0x2UL
- #define QUEUE_BUFFERS_CFG_REQ_ENABLES_GROUP 0x4UL
- #define QUEUE_BUFFERS_CFG_REQ_ENABLES_XOFF 0x8UL
- #define QUEUE_BUFFERS_CFG_REQ_ENABLES_XON 0x10UL
- #define QUEUE_BUFFERS_CFG_REQ_ENABLES_FULL 0x20UL
- #define QUEUE_BUFFERS_CFG_REQ_ENABLES_NOTFULL 0x40UL
- #define QUEUE_BUFFERS_CFG_REQ_ENABLES_MAX 0x80UL
+ #define QUEUE_BUFFERS_CFG_REQ_ENABLES_XOFF 0x4UL
+ #define QUEUE_BUFFERS_CFG_REQ_ENABLES_XON 0x8UL
+ #define QUEUE_BUFFERS_CFG_REQ_ENABLES_FULL 0x10UL
+ #define QUEUE_BUFFERS_CFG_REQ_ENABLES_NOTFULL 0x20UL
+ #define QUEUE_BUFFERS_CFG_REQ_ENABLES_MAX 0x40UL
__le32 queue_id;
__le32 reserved;
__le32 shared;
@@ -1921,15 +1914,15 @@ struct hwrm_queue_pfcenable_cfg_input {
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
- __le32 enables;
- #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI0_PFC_ENABLED 0x1UL
- #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI1_PFC_ENABLED 0x2UL
- #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI2_PFC_ENABLED 0x4UL
- #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI3_PFC_ENABLED 0x8UL
- #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI4_PFC_ENABLED 0x10UL
- #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI5_PFC_ENABLED 0x20UL
- #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI6_PFC_ENABLED 0x40UL
- #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI7_PFC_ENABLED 0x80UL
+ __le32 flags;
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_ENABLED 0x1UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_ENABLED 0x2UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_ENABLED 0x4UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_ENABLED 0x8UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_ENABLED 0x10UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_ENABLED 0x20UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_ENABLED 0x40UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_ENABLED 0x80UL
__le16 port_id;
__le16 unused_0;
};
@@ -1962,14 +1955,14 @@ struct hwrm_queue_pri2cos_cfg_input {
#define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x2UL
__le32 enables;
u8 port_id;
- u8 pri0_cos;
- u8 pri1_cos;
- u8 pri2_cos;
- u8 pri3_cos;
- u8 pri4_cos;
- u8 pri5_cos;
- u8 pri6_cos;
- u8 pri7_cos;
+ u8 pri0_cos_queue_id;
+ u8 pri1_cos_queue_id;
+ u8 pri2_cos_queue_id;
+ u8 pri3_cos_queue_id;
+ u8 pri4_cos_queue_id;
+ u8 pri5_cos_queue_id;
+ u8 pri6_cos_queue_id;
+ u8 pri7_cos_queue_id;
u8 unused_0[7];
};
@@ -2164,6 +2157,7 @@ struct hwrm_vnic_cfg_input {
__le32 flags;
#define VNIC_CFG_REQ_FLAGS_DEFAULT 0x1UL
#define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE 0x2UL
+ #define VNIC_CFG_REQ_FLAGS_BD_STALL_MODE 0x4UL
__le32 enables;
#define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL
#define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL
@@ -2380,18 +2374,16 @@ struct hwrm_ring_alloc_input {
__le16 target_id;
__le64 resp_addr;
__le32 enables;
- #define RING_ALLOC_REQ_ENABLES_ARB_GRP_ID_VALID 0x1UL
- #define RING_ALLOC_REQ_ENABLES_INPUT_NUM_VALID 0x2UL
- #define RING_ALLOC_REQ_ENABLES_WEIGHT_VALID 0x4UL
+ #define RING_ALLOC_REQ_ENABLES_RESERVED1 0x1UL
+ #define RING_ALLOC_REQ_ENABLES_RESERVED2 0x2UL
+ #define RING_ALLOC_REQ_ENABLES_RESERVED3 0x4UL
#define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL
- #define RING_ALLOC_REQ_ENABLES_MIN_BW_VALID 0x10UL
+ #define RING_ALLOC_REQ_ENABLES_RESERVED4 0x10UL
#define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL
u8 ring_type;
#define RING_ALLOC_REQ_RING_TYPE_CMPL (0x0UL << 0)
#define RING_ALLOC_REQ_RING_TYPE_TX (0x1UL << 0)
#define RING_ALLOC_REQ_RING_TYPE_RX (0x2UL << 0)
- #define RING_ALLOC_REQ_RING_TYPE_STATUS (0x3UL << 0)
- #define RING_ALLOC_REQ_RING_TYPE_CMD (0x4UL << 0)
u8 unused_0;
__le16 unused_1;
__le64 page_tbl_addr;
@@ -2406,17 +2398,17 @@ struct hwrm_ring_alloc_input {
__le16 queue_id;
u8 unused_4;
u8 unused_5;
- __le32 arb_grp_id;
- __le16 input_number;
+ __le32 reserved1;
+ __le16 reserved2;
u8 unused_6;
u8 unused_7;
- __le32 weight;
+ __le32 reserved3;
__le32 stat_ctx_id;
- __le32 min_bw;
+ __le32 reserved4;
__le32 max_bw;
u8 int_mode;
#define RING_ALLOC_REQ_INT_MODE_LEGACY (0x0UL << 0)
- #define RING_ALLOC_REQ_INT_MODE_MSI (0x1UL << 0)
+ #define RING_ALLOC_REQ_INT_MODE_RSVD (0x1UL << 0)
#define RING_ALLOC_REQ_INT_MODE_MSIX (0x2UL << 0)
#define RING_ALLOC_REQ_INT_MODE_POLL (0x3UL << 0)
u8 unused_8[3];
@@ -2448,8 +2440,6 @@ struct hwrm_ring_free_input {
#define RING_FREE_REQ_RING_TYPE_CMPL (0x0UL << 0)
#define RING_FREE_REQ_RING_TYPE_TX (0x1UL << 0)
#define RING_FREE_REQ_RING_TYPE_RX (0x2UL << 0)
- #define RING_FREE_REQ_RING_TYPE_STATUS (0x3UL << 0)
- #define RING_FREE_REQ_RING_TYPE_CMD (0x4UL << 0)
u8 unused_0;
__le16 ring_id;
__le32 unused_1;
@@ -2550,8 +2540,6 @@ struct hwrm_ring_reset_input {
#define RING_RESET_REQ_RING_TYPE_CMPL (0x0UL << 0)
#define RING_RESET_REQ_RING_TYPE_TX (0x1UL << 0)
#define RING_RESET_REQ_RING_TYPE_RX (0x2UL << 0)
- #define RING_RESET_REQ_RING_TYPE_STATUS (0x3UL << 0)
- #define RING_RESET_REQ_RING_TYPE_CMD (0x4UL << 0)
u8 unused_0;
__le16 ring_id;
__le32 unused_1;
@@ -2622,61 +2610,6 @@ struct hwrm_ring_grp_free_output {
u8 valid;
};
-/* hwrm_arb_grp_alloc */
-/* Input (24 bytes) */
-struct hwrm_arb_grp_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 input_number;
- __le16 unused_0[3];
-};
-
-/* Output (16 bytes) */
-struct hwrm_arb_grp_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 arb_grp_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* hwrm_arb_grp_cfg */
-/* Input (32 bytes) */
-struct hwrm_arb_grp_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 arb_grp_id;
- __le16 input_number;
- __le16 tx_ring;
- __le32 weight;
- __le32 unused_0;
-};
-
-/* Output (16 bytes) */
-struct hwrm_arb_grp_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
/* hwrm_cfa_l2_filter_alloc */
/* Input (96 bytes) */
struct hwrm_cfa_l2_filter_alloc_input {
@@ -2708,7 +2641,7 @@ struct hwrm_cfa_l2_filter_alloc_input {
#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_TYPE 0x1000UL
#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_ID 0x2000UL
#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4000UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x8000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL
#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
u8 l2_addr[6];
u8 unused_0;
@@ -2751,7 +2684,7 @@ struct hwrm_cfa_l2_filter_alloc_input {
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0)
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
u8 unused_7;
- __le16 dst_vnic_id;
+ __le16 dst_id;
__le16 mirror_vnic_id;
u8 pri_hint;
#define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER (0x0UL << 0)
@@ -2816,10 +2749,11 @@ struct hwrm_cfa_l2_filter_cfg_input {
#define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
#define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
__le32 enables;
- #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_VNIC_ID_VALID 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
__le64 l2_filter_id;
- __le32 dst_vnic_id;
- __le32 unused_0;
+ __le32 dst_id;
+ __le32 new_mirror_vnic_id;
};
/* Output (16 bytes) */
@@ -2843,9 +2777,9 @@ struct hwrm_cfa_l2_set_rx_mask_input {
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
- __le32 dflt_vnic_id;
+ __le32 vnic_id;
__le32 mask;
- #define CFA_L2_SET_RX_MASK_REQ_MASK_UNICAST 0x1UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_RESERVED 0x1UL
#define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST 0x2UL
#define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST 0x4UL
#define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST 0x8UL
@@ -2869,46 +2803,6 @@ struct hwrm_cfa_l2_set_rx_mask_output {
u8 valid;
};
-/* hwrm_cfa_l2_set_bcastmcast_mirroring */
-/* Input (32 bytes) */
-struct hwrm_cfa_l2_set_bcastmcast_mirroring_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 dflt_vnic_id;
- __le32 mirroring_flags;
- #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_BCAST_MIRRORING 0x1UL
- #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_MCAST_MIRRORING 0x2UL
- #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_BCAST_SRC_KNOCKOUT 0x4UL
- #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_MCAST_SRC_KNOCKOUT 0x8UL
- #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_VLAN_ID_VALID 0x10UL
- __le16 vlan_id;
- u8 bcast_domain;
- #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_BCAST_DOMAIN_PFONLY (0x0UL << 0)
- #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_BCAST_DOMAIN_ALLPFS (0x1UL << 0)
- #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_BCAST_DOMAIN_ALLPFSVFS (0x2UL << 0)
- u8 mcast_domain;
- #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MCAST_DOMAIN_PFONLY (0x0UL << 0)
- #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MCAST_DOMAIN_ALLPFS (0x1UL << 0)
- #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MCAST_DOMAIN_ALLPFSVFS (0x2UL << 0)
- __le32 unused_0;
-};
-
-/* Output (16 bytes) */
-struct hwrm_cfa_l2_set_bcastmcast_mirroring_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
/* hwrm_cfa_tunnel_filter_alloc */
/* Input (88 bytes) */
struct hwrm_cfa_tunnel_filter_alloc_input {
@@ -3017,17 +2911,16 @@ struct hwrm_cfa_encap_record_alloc_input {
__le32 encap_data[16];
};
-/* Output (24 bytes) */
+/* Output (16 bytes) */
struct hwrm_cfa_encap_record_alloc_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
- __le64 encap_record_id;
- __le32 unused_0;
+ __le32 encap_record_id;
+ u8 unused_0;
u8 unused_1;
u8 unused_2;
- u8 unused_3;
u8 valid;
};
@@ -3039,7 +2932,8 @@ struct hwrm_cfa_encap_record_free_input {
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
- __le64 encap_record_id;
+ __le32 encap_record_id;
+ __le32 unused_0;
};
/* Output (16 bytes) */
@@ -3083,14 +2977,21 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK 0x2000UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_PRI_HINT 0x4000UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_NTUPLE_FILTER_ID 0x8000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x10000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x10000UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x20000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x40000UL
__le64 l2_filter_id;
u8 src_macaddr[6];
__be16 ethertype;
- u8 ipaddr_type;
+ u8 ip_addr_type;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN (0x0UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 (0x4UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 (0x6UL << 0)
u8 ip_protocol;
- __le16 dst_vnic_id;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN (0x0UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP (0x6UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP (0x11UL << 0)
+ __le16 dst_id;
__le16 mirror_vnic_id;
u8 tunnel_type;
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
@@ -3104,6 +3005,11 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0)
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
u8 pri_hint;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER (0x0UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE (0x1UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_BELOW (0x2UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_HIGHEST (0x3UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST (0x4UL << 0)
__be32 src_ipaddr[4];
__be32 src_ipaddr_mask[4];
__be32 dst_ipaddr[4];
@@ -3162,11 +3068,11 @@ struct hwrm_cfa_ntuple_filter_cfg_input {
__le16 target_id;
__le64 resp_addr;
__le32 enables;
- #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_VNIC_ID_VALID 0x1UL
- #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID_VALID 0x2UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_ID 0x1UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
__le32 unused_0;
__le64 ntuple_filter_id;
- __le32 new_dst_vnic_id;
+ __le32 new_dst_id;
__le32 new_mirror_vnic_id;
};
@@ -3192,16 +3098,8 @@ struct hwrm_tunnel_dst_port_query_input {
__le16 target_id;
__le64 resp_addr;
u8 tunnel_type;
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0)
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0)
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0)
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0)
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_STT (0x7UL << 0)
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0)
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
u8 unused_0[7];
};
@@ -3228,16 +3126,8 @@ struct hwrm_tunnel_dst_port_alloc_input {
__le16 target_id;
__le64 resp_addr;
u8 tunnel_type;
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0)
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0)
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0)
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0)
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_STT (0x7UL << 0)
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0)
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
u8 unused_0;
__be16 tunnel_dst_port_val;
__le32 unused_1;
@@ -3267,16 +3157,8 @@ struct hwrm_tunnel_dst_port_free_input {
__le16 target_id;
__le64 resp_addr;
u8 tunnel_type;
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0)
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0)
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0)
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0)
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_STT (0x7UL << 0)
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0)
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
u8 unused_0;
__le16 tunnel_dst_port_id;
__le32 unused_1;
@@ -3416,68 +3298,145 @@ struct hwrm_stat_ctx_clr_stats_output {
u8 valid;
};
-/* hwrm_mgmt_l2_filter_alloc */
-/* Input (56 bytes) */
-struct hwrm_mgmt_l2_filter_alloc_input {
+/* hwrm_fw_reset */
+/* Input (24 bytes) */
+struct hwrm_fw_reset_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
- __le32 flags;
- #define MGMT_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL
- #define MGMT_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX (0x0UL << 0)
- #define MGMT_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX (0x1UL << 0)
- __le32 enables;
- #define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDRESS 0x1UL
- #define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_OVLAN 0x2UL
- #define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_IVLAN 0x4UL
- #define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_ACTION_ID 0x8UL
- u8 l2_address[6];
+ u8 embedded_proc_type;
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT (0x0UL << 0)
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT (0x1UL << 0)
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL (0x2UL << 0)
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE (0x3UL << 0)
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_RSVD (0x4UL << 0)
+ u8 selfrst_status;
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0)
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0)
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0)
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_fw_reset_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 selfrst_status;
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0)
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0)
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0)
u8 unused_0;
+ __le16 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
+/* hwrm_exec_fwd_resp */
+/* Input (128 bytes) */
+struct hwrm_exec_fwd_resp_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 encap_request[26];
+ __le16 encap_resp_target_id;
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_exec_fwd_resp_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
u8 unused_1;
- u8 l2_address_mask[6];
- __le16 ovlan;
- __le16 ovlan_mask;
- __le16 ivlan;
- __le16 ivlan_mask;
u8 unused_2;
u8 unused_3;
- __le32 action_id;
- u8 action_bypass;
- #define MGMT_L2_FILTER_ALLOC_REQ_ACTION_BYPASS 0x1UL
- u8 unused_5[3];
+ u8 valid;
+};
+
+/* hwrm_reject_fwd_resp */
+/* Input (128 bytes) */
+struct hwrm_reject_fwd_resp_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 encap_request[26];
+ __le16 encap_resp_target_id;
+ __le16 unused_0[3];
};
/* Output (16 bytes) */
-struct hwrm_mgmt_l2_filter_alloc_output {
+struct hwrm_reject_fwd_resp_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
- __le16 mgmt_l2_filter_id;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_fwd_resp */
+/* Input (40 bytes) */
+struct hwrm_fwd_resp_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 encap_resp_target_id;
+ __le16 encap_resp_cmpl_ring;
+ __le16 encap_resp_len;
u8 unused_0;
u8 unused_1;
+ __le64 encap_resp_addr;
+ __le32 encap_resp[24];
+};
+
+/* Output (16 bytes) */
+struct hwrm_fwd_resp_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
u8 unused_2;
u8 unused_3;
- u8 unused_4;
u8 valid;
};
-/* hwrm_mgmt_l2_filter_free */
-/* Input (24 bytes) */
-struct hwrm_mgmt_l2_filter_free_input {
+/* hwrm_fwd_async_event_cmpl */
+/* Input (32 bytes) */
+struct hwrm_fwd_async_event_cmpl_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
- __le16 mgmt_l2_filter_id;
- __le16 unused_0[3];
+ __le16 encap_async_event_target_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2[3];
+ u8 unused_3;
+ __le32 encap_async_event_cmpl[4];
};
/* Output (16 bytes) */
-struct hwrm_mgmt_l2_filter_free_output {
+struct hwrm_fwd_async_event_cmpl_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
@@ -3489,6 +3448,31 @@ struct hwrm_mgmt_l2_filter_free_output {
u8 valid;
};
+/* hwrm_temp_monitor_query */
+/* Input (16 bytes) */
+struct hwrm_temp_monitor_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_temp_monitor_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 temp;
+ u8 unused_0;
+ __le16 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
/* hwrm_nvm_raw_write_blk */
/* Input (32 bytes) */
struct hwrm_nvm_raw_write_blk_input {
@@ -3621,7 +3605,7 @@ struct hwrm_nvm_get_dir_info_output {
};
/* hwrm_nvm_write */
-/* Input (40 bytes) */
+/* Input (48 bytes) */
struct hwrm_nvm_write_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -3637,6 +3621,8 @@ struct hwrm_nvm_write_input {
__le16 option;
__le16 flags;
#define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG 0x1UL
+ __le32 dir_item_length;
+ __le32 unused_0;
};
/* Output (16 bytes) */
@@ -3645,10 +3631,9 @@ struct hwrm_nvm_write_output {
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
+ __le32 dir_item_length;
+ __le16 dir_idx;
+ u8 unused_0;
u8 valid;
};
@@ -3833,214 +3818,4 @@ struct hwrm_nvm_verify_update_output {
u8 valid;
};
-/* hwrm_exec_fwd_resp */
-/* Input (120 bytes) */
-struct hwrm_exec_fwd_resp_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 encap_request[24];
- __le16 encap_resp_target_id;
- __le16 unused_0[3];
-};
-
-/* Output (16 bytes) */
-struct hwrm_exec_fwd_resp_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_reject_fwd_resp */
-/* Input (120 bytes) */
-struct hwrm_reject_fwd_resp_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 encap_request[24];
- __le16 encap_resp_target_id;
- __le16 unused_0[3];
-};
-
-/* Output (16 bytes) */
-struct hwrm_reject_fwd_resp_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_fwd_resp */
-/* Input (40 bytes) */
-struct hwrm_fwd_resp_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 encap_resp_target_id;
- __le16 encap_resp_cmpl_ring;
- __le16 encap_resp_len;
- u8 unused_0;
- u8 unused_1;
- __le64 encap_resp_addr;
- __le32 encap_resp[24];
-};
-
-/* Output (16 bytes) */
-struct hwrm_fwd_resp_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_fwd_async_event_cmpl */
-/* Input (32 bytes) */
-struct hwrm_fwd_async_event_cmpl_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 encap_async_event_target_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2[3];
- u8 unused_3;
- __le32 encap_async_event_cmpl[4];
-};
-
-/* Output (16 bytes) */
-struct hwrm_fwd_async_event_cmpl_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_fw_reset */
-/* Input (24 bytes) */
-struct hwrm_fw_reset_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 embedded_proc_type;
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIMP (0x0UL << 0)
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_APE (0x1UL << 0)
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_KONG (0x2UL << 0)
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BONO (0x3UL << 0)
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_TANG (0x4UL << 0)
- u8 selfrst_status;
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0)
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0)
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0)
- __le16 unused_0[3];
-};
-
-/* Output (16 bytes) */
-struct hwrm_fw_reset_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 selfrst_status;
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0)
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0)
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0)
- u8 unused_0;
- __le16 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* hwrm_fw_qstatus */
-/* Input (24 bytes) */
-struct hwrm_fw_qstatus_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 embedded_proc_type;
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIMP (0x0UL << 0)
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_APE (0x1UL << 0)
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_KONG (0x2UL << 0)
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BONO (0x3UL << 0)
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_TANG (0x4UL << 0)
- u8 unused_0[7];
-};
-
-/* Output (16 bytes) */
-struct hwrm_fw_qstatus_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 selfrst_status;
- #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0)
- #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0)
- #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0)
- u8 unused_0;
- __le16 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* hwrm_temp_monitor_query */
-/* Input (16 bytes) */
-struct hwrm_temp_monitor_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* Output (16 bytes) */
-struct hwrm_temp_monitor_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 temp;
- u8 unused_0;
- __le16 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index ea044bb..c1cc83d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -64,7 +64,7 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
* the spoof check should also include vlan anti-spoofing
*/
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
- req.vf_id = cpu_to_le16(vf->fw_fid);
+ req.fid = cpu_to_le16(vf->fw_fid);
req.flags = cpu_to_le32(func_flags);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc) {
@@ -128,7 +128,7 @@ int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
memcpy(vf->mac_addr, mac, ETH_ALEN);
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
- req.vf_id = cpu_to_le16(vf->fw_fid);
+ req.fid = cpu_to_le16(vf->fw_fid);
req.flags = cpu_to_le32(vf->func_flags);
req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
@@ -159,7 +159,7 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos)
return 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
- req.vf_id = cpu_to_le16(vf->fw_fid);
+ req.fid = cpu_to_le16(vf->fw_fid);
req.flags = cpu_to_le32(vf->func_flags);
req.dflt_vlan = cpu_to_le16(vlan_tag);
req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
@@ -198,7 +198,7 @@ int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate)
return 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
- req.vf_id = cpu_to_le16(vf->fw_fid);
+ req.fid = cpu_to_le16(vf->fw_fid);
req.flags = cpu_to_le32(vf->func_flags);
req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
req.max_bw = cpu_to_le32(max_tx_rate);
@@ -363,10 +363,11 @@ static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
}
/* only call by PF to reserve resources for VF */
-static int bnxt_hwrm_func_cfg(struct bnxt *bp, int *num_vfs)
+static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
{
u32 rc = 0, mtu, i;
u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics;
+ u16 vf_ring_grps;
struct hwrm_func_cfg_input req = {0};
struct bnxt_pf_info *pf = &bp->pf;
@@ -378,18 +379,18 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int *num_vfs)
* be removed once new HWRM provides HW ring groups capability in
* hwrm_func_qcap.
*/
- vf_cp_rings = min_t(u16, bp->pf.max_cp_rings, bp->pf.max_stat_ctxs);
- vf_cp_rings = (vf_cp_rings - bp->cp_nr_rings) / *num_vfs;
+ vf_cp_rings = min_t(u16, pf->max_cp_rings, pf->max_stat_ctxs);
+ vf_cp_rings = (vf_cp_rings - bp->cp_nr_rings) / num_vfs;
/* TODO: restore this logic below once the WA above is removed */
- /* vf_cp_rings = (bp->pf.max_cp_rings - bp->cp_nr_rings) / *num_vfs; */
- vf_stat_ctx = (bp->pf.max_stat_ctxs - bp->num_stat_ctxs) / *num_vfs;
+ /* vf_cp_rings = (pf->max_cp_rings - bp->cp_nr_rings) / num_vfs; */
+ vf_stat_ctx = (pf->max_stat_ctxs - bp->num_stat_ctxs) / num_vfs;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
- vf_rx_rings = (bp->pf.max_rx_rings - bp->rx_nr_rings * 2) /
- *num_vfs;
+ vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings * 2) /
+ num_vfs;
else
- vf_rx_rings = (bp->pf.max_rx_rings - bp->rx_nr_rings) /
- *num_vfs;
- vf_tx_rings = (bp->pf.max_tx_rings - bp->tx_nr_rings) / *num_vfs;
+ vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings) / num_vfs;
+ vf_ring_grps = (bp->pf.max_hw_ring_grps - bp->rx_nr_rings) / num_vfs;
+ vf_tx_rings = (pf->max_tx_rings - bp->tx_nr_rings) / num_vfs;
req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU |
FUNC_CFG_REQ_ENABLES_MRU |
@@ -399,7 +400,8 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int *num_vfs)
FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS |
FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS |
- FUNC_CFG_REQ_ENABLES_NUM_VNICS);
+ FUNC_CFG_REQ_ENABLES_NUM_VNICS |
+ FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS);
mtu = bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
req.mru = cpu_to_le16(mtu);
@@ -409,6 +411,7 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int *num_vfs)
req.num_cmpl_rings = cpu_to_le16(vf_cp_rings);
req.num_tx_rings = cpu_to_le16(vf_tx_rings);
req.num_rx_rings = cpu_to_le16(vf_rx_rings);
+ req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps);
req.num_l2_ctxs = cpu_to_le16(4);
vf_vnics = 1;
@@ -417,22 +420,24 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int *num_vfs)
req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx);
mutex_lock(&bp->hwrm_cmd_lock);
- for (i = 0; i < *num_vfs; i++) {
- req.vf_id = cpu_to_le16(pf->first_vf_id + i);
+ for (i = 0; i < num_vfs; i++) {
+ req.fid = cpu_to_le16(pf->first_vf_id + i);
rc = _hwrm_send_message(bp, &req, sizeof(req),
HWRM_CMD_TIMEOUT);
if (rc)
break;
- bp->pf.active_vfs = i + 1;
- bp->pf.vf[i].fw_fid = le16_to_cpu(req.vf_id);
+ pf->active_vfs = i + 1;
+ pf->vf[i].fw_fid = le16_to_cpu(req.fid);
}
mutex_unlock(&bp->hwrm_cmd_lock);
if (!rc) {
- bp->pf.max_pf_tx_rings = bp->tx_nr_rings;
- if (bp->flags & BNXT_FLAG_AGG_RINGS)
- bp->pf.max_pf_rx_rings = bp->rx_nr_rings * 2;
- else
- bp->pf.max_pf_rx_rings = bp->rx_nr_rings;
+ pf->max_tx_rings -= vf_tx_rings * num_vfs;
+ pf->max_rx_rings -= vf_rx_rings * num_vfs;
+ pf->max_hw_ring_grps -= vf_ring_grps * num_vfs;
+ pf->max_cp_rings -= vf_cp_rings * num_vfs;
+ pf->max_rsscos_ctxs -= num_vfs;
+ pf->max_stat_ctxs -= vf_stat_ctx * num_vfs;
+ pf->max_vnics -= vf_vnics * num_vfs;
}
return rc;
}
@@ -492,7 +497,7 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
goto err_out1;
/* Reserve resources for VFs */
- rc = bnxt_hwrm_func_cfg(bp, num_vfs);
+ rc = bnxt_hwrm_func_cfg(bp, *num_vfs);
if (rc)
goto err_out2;
@@ -536,8 +541,8 @@ void bnxt_sriov_disable(struct bnxt *bp)
bnxt_free_vf_resources(bp);
bp->pf.active_vfs = 0;
- bp->pf.max_pf_rx_rings = bp->pf.max_rx_rings;
- bp->pf.max_pf_tx_rings = bp->pf.max_tx_rings;
+ /* Reclaim all resources for the PF. */
+ bnxt_hwrm_func_qcaps(bp);
}
int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
@@ -595,6 +600,7 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
/* Set the new target id */
req.target_id = cpu_to_le16(vf->fw_fid);
+ req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
req.encap_resp_len = cpu_to_le16(msg_size);
req.encap_resp_addr = encap_resp_addr;
req.encap_resp_cmpl_ring = encap_resp_cpr;
@@ -629,6 +635,7 @@ static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1);
/* Set the new target id */
req.target_id = cpu_to_le16(vf->fw_fid);
+ req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
mutex_lock(&bp->hwrm_cmd_lock);
@@ -660,6 +667,7 @@ static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1);
/* Set the new target id */
req.target_id = cpu_to_le16(vf->fw_fid);
+ req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
mutex_lock(&bp->hwrm_cmd_lock);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index e01e722..ec6e849 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -301,6 +301,8 @@ struct devlog_params {
/* Stores chip specific parameters */
struct arch_specific_params {
u8 nchan;
+ u8 pm_stats_cnt;
+ u8 cng_ch_bits_log; /* congestion channel map bits width */
u16 mps_rplc_size;
u16 vfcount;
u32 sge_fl_db;
@@ -398,11 +400,10 @@ struct link_config {
enum {
MAX_ETH_QSETS = 32, /* # of Ethernet Tx/Rx queue sets */
- MAX_OFLD_QSETS = 16, /* # of offload Tx/Rx queue sets */
+ MAX_OFLD_QSETS = 16, /* # of offload Tx, iscsi Rx queue sets */
MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */
MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */
MAX_RDMA_CIQS = 32, /* # of RDMA concentrator IQs */
- MAX_ISCSI_QUEUES = NCHAN, /* # of streaming iSCSI Rx queues */
};
enum {
@@ -420,7 +421,7 @@ enum {
INGQ_EXTRAS = 2, /* firmware event queue and */
/* forwarded interrupts */
MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES
- + MAX_RDMA_CIQS + MAX_ISCSI_QUEUES + INGQ_EXTRAS,
+ + MAX_RDMA_CIQS + INGQ_EXTRAS,
};
struct adapter;
@@ -639,7 +640,7 @@ struct sge {
struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES];
struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
- struct sge_ofld_rxq ofldrxq[MAX_OFLD_QSETS];
+ struct sge_ofld_rxq iscsirxq[MAX_OFLD_QSETS];
struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES];
struct sge_ofld_rxq rdmaciq[MAX_RDMA_CIQS];
struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
@@ -650,10 +651,10 @@ struct sge {
u16 max_ethqsets; /* # of available Ethernet queue sets */
u16 ethqsets; /* # of active Ethernet queue sets */
u16 ethtxq_rover; /* Tx queue to clean up next */
- u16 ofldqsets; /* # of active offload queue sets */
+ u16 iscsiqsets; /* # of active iSCSI queue sets */
u16 rdmaqs; /* # of available RDMA Rx queues */
u16 rdmaciqs; /* # of available RDMA concentrator IQs */
- u16 ofld_rxq[MAX_OFLD_QSETS];
+ u16 iscsi_rxq[MAX_OFLD_QSETS];
u16 rdma_rxq[MAX_RDMA_QUEUES];
u16 rdma_ciq[MAX_RDMA_CIQS];
u16 timer_val[SGE_NTIMERS];
@@ -679,7 +680,7 @@ struct sge {
};
#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++)
-#define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++)
+#define for_each_iscsirxq(sge, i) for (i = 0; i < (sge)->iscsiqsets; i++)
#define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++)
#define for_each_rdmaciq(sge, i) for (i = 0; i < (sge)->rdmaciqs; i++)
@@ -1256,6 +1257,7 @@ int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver);
int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op);
int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
const u8 *fw_data, unsigned int size, int force);
+int t4_fl_pkt_align(struct adapter *adap);
unsigned int t4_flash_cfg_addr(struct adapter *adapter);
int t4_check_fw_version(struct adapter *adap);
int t4_get_fw_version(struct adapter *adapter, u32 *vers);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 0d579b1..1bab34f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -757,8 +757,8 @@ static int pm_stats_show(struct seq_file *seq, void *v)
};
int i;
- u32 tx_cnt[PM_NSTATS], rx_cnt[PM_NSTATS];
- u64 tx_cyc[PM_NSTATS], rx_cyc[PM_NSTATS];
+ u32 tx_cnt[T6_PM_NSTATS], rx_cnt[T6_PM_NSTATS];
+ u64 tx_cyc[T6_PM_NSTATS], rx_cyc[T6_PM_NSTATS];
struct adapter *adap = seq->private;
t4_pmtx_get_stats(adap, tx_cnt, tx_cyc);
@@ -773,6 +773,32 @@ static int pm_stats_show(struct seq_file *seq, void *v)
for (i = 0; i < PM_NSTATS - 1; i++)
seq_printf(seq, "%-13s %10u %20llu\n",
rx_pm_stats[i], rx_cnt[i], rx_cyc[i]);
+
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
+ /* In T5 the granularity of the total wait is too fine.
+ * It is not useful as it reaches the max value too fast.
+ * Hence display this Input FIFO wait for T6 onwards.
+ */
+ seq_printf(seq, "%13s %10s %20s\n",
+ " ", "Total wait", "Total Occupancy");
+ seq_printf(seq, "Tx FIFO wait %10u %20llu\n",
+ tx_cnt[i], tx_cyc[i]);
+ seq_printf(seq, "Rx FIFO wait %10u %20llu\n",
+ rx_cnt[i], rx_cyc[i]);
+
+ /* Skip index 6 as there is nothing useful ihere */
+ i += 2;
+
+ /* At index 7, a new stat for read latency (count, total wait)
+ * is added.
+ */
+ seq_printf(seq, "%13s %10s %20s\n",
+ " ", "Reads", "Total wait");
+ seq_printf(seq, "Tx latency %10u %20llu\n",
+ tx_cnt[i], tx_cyc[i]);
+ seq_printf(seq, "Rx latency %10u %20llu\n",
+ rx_cnt[i], rx_cyc[i]);
+ }
return 0;
}
@@ -1559,25 +1585,35 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
{
struct adapter *adap = seq->private;
unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
-
if (v == SEQ_START_TOKEN) {
- if (adap->params.arch.mps_rplc_size > 128)
+ if (chip_ver > CHELSIO_T5) {
seq_puts(seq, "Idx Ethernet address Mask "
+ " VNI Mask IVLAN Vld "
+ "DIP_Hit Lookup Port "
"Vld Ports PF VF "
"Replication "
" P0 P1 P2 P3 ML\n");
- else
- seq_puts(seq, "Idx Ethernet address Mask "
- "Vld Ports PF VF Replication"
- " P0 P1 P2 P3 ML\n");
+ } else {
+ if (adap->params.arch.mps_rplc_size > 128)
+ seq_puts(seq, "Idx Ethernet address Mask "
+ "Vld Ports PF VF "
+ "Replication "
+ " P0 P1 P2 P3 ML\n");
+ else
+ seq_puts(seq, "Idx Ethernet address Mask "
+ "Vld Ports PF VF Replication"
+ " P0 P1 P2 P3 ML\n");
+ }
} else {
u64 mask;
u8 addr[ETH_ALEN];
- bool replicate;
+ bool replicate, dip_hit = false, vlan_vld = false;
unsigned int idx = (uintptr_t)v - 2;
u64 tcamy, tcamx, val;
- u32 cls_lo, cls_hi, ctl;
+ u32 cls_lo, cls_hi, ctl, data2, vnix = 0, vniy = 0;
u32 rplc[8] = {0};
+ u8 lookup_type = 0, port_num = 0;
+ u16 ivlan = 0;
if (chip_ver > CHELSIO_T5) {
/* CtlCmdType - 0: Read, 1: Write
@@ -1596,6 +1632,22 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
val = t4_read_reg(adap, MPS_CLS_TCAM_DATA1_A);
tcamy = DMACH_G(val) << 32;
tcamy |= t4_read_reg(adap, MPS_CLS_TCAM_DATA0_A);
+ data2 = t4_read_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A);
+ lookup_type = DATALKPTYPE_G(data2);
+ /* 0 - Outer header, 1 - Inner header
+ * [71:48] bit locations are overloaded for
+ * outer vs. inner lookup types.
+ */
+ if (lookup_type && (lookup_type != DATALKPTYPE_M)) {
+ /* Inner header VNI */
+ vniy = ((data2 & DATAVIDH2_F) << 23) |
+ (DATAVIDH1_G(data2) << 16) | VIDL_G(val);
+ dip_hit = data2 & DATADIPHIT_F;
+ } else {
+ vlan_vld = data2 & DATAVIDH2_F;
+ ivlan = VIDL_G(val);
+ }
+ port_num = DATAPORTNUM_G(data2);
/* Read tcamx. Change the control param */
ctl |= CTLXYBITSEL_V(1);
@@ -1603,6 +1655,12 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
val = t4_read_reg(adap, MPS_CLS_TCAM_DATA1_A);
tcamx = DMACH_G(val) << 32;
tcamx |= t4_read_reg(adap, MPS_CLS_TCAM_DATA0_A);
+ data2 = t4_read_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A);
+ if (lookup_type && (lookup_type != DATALKPTYPE_M)) {
+ /* Inner header VNI mask */
+ vnix = ((data2 & DATAVIDH2_F) << 23) |
+ (DATAVIDH1_G(data2) << 16) | VIDL_G(val);
+ }
} else {
tcamy = t4_read_reg64(adap, MPS_CLS_TCAM_Y_L(idx));
tcamx = t4_read_reg64(adap, MPS_CLS_TCAM_X_L(idx));
@@ -1662,17 +1720,47 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
}
tcamxy2valmask(tcamx, tcamy, addr, &mask);
- if (chip_ver > CHELSIO_T5)
- seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x "
- "%012llx%3c %#x%4u%4d",
- idx, addr[0], addr[1], addr[2], addr[3],
- addr[4], addr[5], (unsigned long long)mask,
- (cls_lo & T6_SRAM_VLD_F) ? 'Y' : 'N',
- PORTMAP_G(cls_hi),
- T6_PF_G(cls_lo),
- (cls_lo & T6_VF_VALID_F) ?
- T6_VF_G(cls_lo) : -1);
- else
+ if (chip_ver > CHELSIO_T5) {
+ /* Inner header lookup */
+ if (lookup_type && (lookup_type != DATALKPTYPE_M)) {
+ seq_printf(seq,
+ "%3u %02x:%02x:%02x:%02x:%02x:%02x "
+ "%012llx %06x %06x - - %3c"
+ " %3c %4x "
+ "%3c %#x%4u%4d", idx, addr[0],
+ addr[1], addr[2], addr[3],
+ addr[4], addr[5],
+ (unsigned long long)mask,
+ vniy, vnix, dip_hit ? 'Y' : 'N',
+ lookup_type ? 'I' : 'O', port_num,
+ (cls_lo & T6_SRAM_VLD_F) ? 'Y' : 'N',
+ PORTMAP_G(cls_hi),
+ T6_PF_G(cls_lo),
+ (cls_lo & T6_VF_VALID_F) ?
+ T6_VF_G(cls_lo) : -1);
+ } else {
+ seq_printf(seq,
+ "%3u %02x:%02x:%02x:%02x:%02x:%02x "
+ "%012llx - - ",
+ idx, addr[0], addr[1], addr[2],
+ addr[3], addr[4], addr[5],
+ (unsigned long long)mask);
+
+ if (vlan_vld)
+ seq_printf(seq, "%4u Y ", ivlan);
+ else
+ seq_puts(seq, " - N ");
+
+ seq_printf(seq,
+ "- %3c %4x %3c %#x%4u%4d",
+ lookup_type ? 'I' : 'O', port_num,
+ (cls_lo & T6_SRAM_VLD_F) ? 'Y' : 'N',
+ PORTMAP_G(cls_hi),
+ T6_PF_G(cls_lo),
+ (cls_lo & T6_VF_VALID_F) ?
+ T6_VF_G(cls_lo) : -1);
+ }
+ } else
seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x "
"%012llx%3c %#x%4u%4d",
idx, addr[0], addr[1], addr[2], addr[3],
@@ -2245,7 +2333,7 @@ static int sge_qinfo_show(struct seq_file *seq, void *v)
{
struct adapter *adap = seq->private;
int eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
- int iscsi_entries = DIV_ROUND_UP(adap->sge.ofldqsets, 4);
+ int iscsi_entries = DIV_ROUND_UP(adap->sge.iscsiqsets, 4);
int rdma_entries = DIV_ROUND_UP(adap->sge.rdmaqs, 4);
int ciq_entries = DIV_ROUND_UP(adap->sge.rdmaciqs, 4);
int ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
@@ -2331,10 +2419,10 @@ do { \
} else if (iscsi_idx < iscsi_entries) {
const struct sge_ofld_rxq *rx =
- &adap->sge.ofldrxq[iscsi_idx * 4];
+ &adap->sge.iscsirxq[iscsi_idx * 4];
const struct sge_ofld_txq *tx =
&adap->sge.ofldtxq[iscsi_idx * 4];
- int n = min(4, adap->sge.ofldqsets - 4 * iscsi_idx);
+ int n = min(4, adap->sge.iscsiqsets - 4 * iscsi_idx);
S("QType:", "iSCSI");
T("TxQ ID:", q.cntxt_id);
@@ -2454,7 +2542,7 @@ do { \
static int sge_queue_entries(const struct adapter *adap)
{
return DIV_ROUND_UP(adap->sge.ethqsets, 4) +
- DIV_ROUND_UP(adap->sge.ofldqsets, 4) +
+ DIV_ROUND_UP(adap->sge.iscsiqsets, 4) +
DIV_ROUND_UP(adap->sge.rdmaqs, 4) +
DIV_ROUND_UP(adap->sge.rdmaciqs, 4) +
DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 2a61a42..7a0b92b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -686,7 +686,7 @@ static int set_pauseparam(struct net_device *dev,
if (epause->tx_pause)
lc->requested_fc |= PAUSE_TX;
if (netif_running(dev))
- return t4_link_l1cfg(p->adapter, p->adapter->pf, p->tx_chan,
+ return t4_link_l1cfg(p->adapter, p->adapter->mbox, p->tx_chan,
lc);
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 8490c84..b8a5fb0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -162,19 +162,8 @@ MODULE_FIRMWARE(FW6_FNAME);
static uint force_init;
module_param(force_init, uint, 0644);
-MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
-
-/*
- * Normally if the firmware we connect to has Configuration File support, we
- * use that and only fall back to the old Driver-based initialization if the
- * Configuration File fails for some reason. If force_old_init is set, then
- * we'll always use the old Driver-based initialization sequence.
- */
-static uint force_old_init;
-
-module_param(force_old_init, uint, 0644);
-MODULE_PARM_DESC(force_old_init, "Force old initialization sequence, deprecated"
- " parameter");
+MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter,"
+ "deprecated parameter");
static int dflt_msg_enable = DFLT_MSG_ENABLE;
@@ -196,23 +185,6 @@ module_param(msi, int, 0644);
MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
/*
- * Queue interrupt hold-off timer values. Queues default to the first of these
- * upon creation.
- */
-static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
-
-module_param_array(intr_holdoff, uint, NULL, 0644);
-MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
- "0..4 in microseconds, deprecated parameter");
-
-static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
-
-module_param_array(intr_cnt, uint, NULL, 0644);
-MODULE_PARM_DESC(intr_cnt,
- "thresholds 1..3 for queue interrupt packet counters, "
- "deprecated parameter");
-
-/*
* Normally we tell the chip to deliver Ingress Packets into our DMA buffers
* offset by 2 bytes in order to have the IP headers line up on 4-byte
* boundaries. This is a requirement for many architectures which will throw
@@ -226,13 +198,7 @@ MODULE_PARM_DESC(intr_cnt,
*/
static int rx_dma_offset = 2;
-static bool vf_acls;
-
#ifdef CONFIG_PCI_IOV
-module_param(vf_acls, bool, 0644);
-MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement, "
- "deprecated parameter");
-
/* Configure the number of PCI-E Virtual Function which are to be instantiated
* on SR-IOV Capable Physical Functions.
*/
@@ -253,12 +219,6 @@ module_param(select_queue, int, 0644);
MODULE_PARM_DESC(select_queue,
"Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
-static unsigned int tp_vlan_pri_map = HW_TPL_FR_MT_PR_IV_P_FC;
-
-module_param(tp_vlan_pri_map, uint, 0644);
-MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration, "
- "deprecated parameter");
-
static struct dentry *cxgb4_debugfs_root;
static LIST_HEAD(adapter_list);
@@ -766,8 +726,8 @@ static void name_msix_vecs(struct adapter *adap)
}
/* offload queues */
- for_each_ofldrxq(&adap->sge, i)
- snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
+ for_each_iscsirxq(&adap->sge, i)
+ snprintf(adap->msix_info[msi_idx++].desc, n, "%s-iscsi%d",
adap->port[0]->name, i);
for_each_rdmarxq(&adap->sge, i)
@@ -782,7 +742,7 @@ static void name_msix_vecs(struct adapter *adap)
static int request_msix_queue_irqs(struct adapter *adap)
{
struct sge *s = &adap->sge;
- int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
+ int err, ethqidx, iscsiqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
int msi_index = 2;
err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
@@ -799,11 +759,11 @@ static int request_msix_queue_irqs(struct adapter *adap)
goto unwind;
msi_index++;
}
- for_each_ofldrxq(s, ofldqidx) {
+ for_each_iscsirxq(s, iscsiqidx) {
err = request_irq(adap->msix_info[msi_index].vec,
t4_sge_intr_msix, 0,
adap->msix_info[msi_index].desc,
- &s->ofldrxq[ofldqidx].rspq);
+ &s->iscsirxq[iscsiqidx].rspq);
if (err)
goto unwind;
msi_index++;
@@ -835,9 +795,9 @@ unwind:
while (--rdmaqidx >= 0)
free_irq(adap->msix_info[--msi_index].vec,
&s->rdmarxq[rdmaqidx].rspq);
- while (--ofldqidx >= 0)
+ while (--iscsiqidx >= 0)
free_irq(adap->msix_info[--msi_index].vec,
- &s->ofldrxq[ofldqidx].rspq);
+ &s->iscsirxq[iscsiqidx].rspq);
while (--ethqidx >= 0)
free_irq(adap->msix_info[--msi_index].vec,
&s->ethrxq[ethqidx].rspq);
@@ -853,8 +813,9 @@ static void free_msix_queue_irqs(struct adapter *adap)
free_irq(adap->msix_info[1].vec, &s->fw_evtq);
for_each_ethrxq(s, i)
free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
- for_each_ofldrxq(s, i)
- free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
+ for_each_iscsirxq(s, i)
+ free_irq(adap->msix_info[msi_index++].vec,
+ &s->iscsirxq[i].rspq);
for_each_rdmarxq(s, i)
free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
for_each_rdmaciq(s, i)
@@ -1093,8 +1054,8 @@ freeout: t4_free_sge_resources(adap);
}
}
- j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
- for_each_ofldrxq(s, i) {
+ j = s->iscsiqsets / adap->params.nports; /* iscsi queues per channel */
+ for_each_iscsirxq(s, i) {
err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i],
adap->port[i / j],
s->fw_evtq.cntxt_id);
@@ -1110,7 +1071,7 @@ freeout: t4_free_sge_resources(adap);
msi_idx += nq; \
} while (0)
- ALLOC_OFLD_RXQS(s->ofldrxq, s->ofldqsets, j, s->ofld_rxq);
+ ALLOC_OFLD_RXQS(s->iscsirxq, s->iscsiqsets, j, s->iscsi_rxq);
ALLOC_OFLD_RXQS(s->rdmarxq, s->rdmaqs, 1, s->rdma_rxq);
j = s->rdmaciqs / adap->params.nports; /* rdmaq queues per channel */
ALLOC_OFLD_RXQS(s->rdmaciq, s->rdmaciqs, j, s->rdma_ciq);
@@ -1505,7 +1466,7 @@ int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
else
stid = -1;
} else {
- stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
+ stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 1);
if (stid < 0)
stid = -1;
}
@@ -1519,7 +1480,7 @@ int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
if (family == PF_INET)
t->stids_in_use++;
else
- t->stids_in_use += 4;
+ t->stids_in_use += 2;
}
spin_unlock_bh(&t->stid_lock);
return stid;
@@ -1570,13 +1531,13 @@ void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
if (family == PF_INET)
__clear_bit(stid, t->stid_bmap);
else
- bitmap_release_region(t->stid_bmap, stid, 2);
+ bitmap_release_region(t->stid_bmap, stid, 1);
t->stid_tab[stid].data = NULL;
if (stid < t->nstids) {
if (family == PF_INET)
t->stids_in_use--;
else
- t->stids_in_use -= 4;
+ t->stids_in_use -= 2;
} else {
t->sftids_in_use--;
}
@@ -2277,7 +2238,7 @@ static void disable_dbs(struct adapter *adap)
for_each_ethrxq(&adap->sge, i)
disable_txq_db(&adap->sge.ethtxq[i].q);
- for_each_ofldrxq(&adap->sge, i)
+ for_each_iscsirxq(&adap->sge, i)
disable_txq_db(&adap->sge.ofldtxq[i].q);
for_each_port(adap, i)
disable_txq_db(&adap->sge.ctrlq[i].q);
@@ -2289,7 +2250,7 @@ static void enable_dbs(struct adapter *adap)
for_each_ethrxq(&adap->sge, i)
enable_txq_db(adap, &adap->sge.ethtxq[i].q);
- for_each_ofldrxq(&adap->sge, i)
+ for_each_iscsirxq(&adap->sge, i)
enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
for_each_port(adap, i)
enable_txq_db(adap, &adap->sge.ctrlq[i].q);
@@ -2359,7 +2320,7 @@ static void recover_all_queues(struct adapter *adap)
for_each_ethrxq(&adap->sge, i)
sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
- for_each_ofldrxq(&adap->sge, i)
+ for_each_iscsirxq(&adap->sge, i)
sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
for_each_port(adap, i)
sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
@@ -2443,10 +2404,10 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
lli.nrxq = adap->sge.rdmaqs;
lli.nciq = adap->sge.rdmaciqs;
} else if (uld == CXGB4_ULD_ISCSI) {
- lli.rxq_ids = adap->sge.ofld_rxq;
- lli.nrxq = adap->sge.ofldqsets;
+ lli.rxq_ids = adap->sge.iscsi_rxq;
+ lli.nrxq = adap->sge.iscsiqsets;
}
- lli.ntxq = adap->sge.ofldqsets;
+ lli.ntxq = adap->sge.iscsiqsets;
lli.nchan = adap->params.nports;
lli.nports = adap->params.nports;
lli.wr_cred = adap->params.ofldq_wr_cred;
@@ -3140,16 +3101,6 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
if (ret < 0)
return ret;
- /* select capabilities we'll be using */
- if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
- if (!vf_acls)
- c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
- else
- c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
- } else if (vf_acls) {
- dev_err(adap->pdev_dev, "virtualization ACLs not supported");
- return ret;
- }
c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL);
@@ -4342,11 +4293,11 @@ static void cfg_queues(struct adapter *adap)
* capped by the number of available cores.
*/
if (n10g) {
- i = min_t(int, ARRAY_SIZE(s->ofldrxq),
+ i = min_t(int, ARRAY_SIZE(s->iscsirxq),
num_online_cpus());
- s->ofldqsets = roundup(i, adap->params.nports);
+ s->iscsiqsets = roundup(i, adap->params.nports);
} else
- s->ofldqsets = adap->params.nports;
+ s->iscsiqsets = adap->params.nports;
/* For RDMA one Rx queue per channel suffices */
s->rdmaqs = adap->params.nports;
/* Try and allow at least 1 CIQ per cpu rounding down
@@ -4377,8 +4328,8 @@ static void cfg_queues(struct adapter *adap)
for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
s->ofldtxq[i].q.size = 1024;
- for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
- struct sge_ofld_rxq *r = &s->ofldrxq[i];
+ for (i = 0; i < ARRAY_SIZE(s->iscsirxq); i++) {
+ struct sge_ofld_rxq *r = &s->iscsirxq[i];
init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
r->rspq.uld = CXGB4_ULD_ISCSI;
@@ -4459,7 +4410,7 @@ static int enable_msix(struct adapter *adap)
want = s->max_ethqsets + EXTRA_VECS;
if (is_offload(adap)) {
- want += s->rdmaqs + s->rdmaciqs + s->ofldqsets;
+ want += s->rdmaqs + s->rdmaciqs + s->iscsiqsets;
/* need nchan for each possible ULD */
ofld_need = 3 * nchan;
}
@@ -4498,13 +4449,13 @@ static int enable_msix(struct adapter *adap)
/* leftovers go to OFLD */
i = allocated - EXTRA_VECS - s->max_ethqsets -
s->rdmaqs - s->rdmaciqs;
- s->ofldqsets = (i / nchan) * nchan; /* round down */
+ s->iscsiqsets = (i / nchan) * nchan; /* round down */
}
for (i = 0; i < allocated; ++i)
adap->msix_info[i].vec = entries[i].vector;
dev_info(adap->pdev_dev, "%d MSI-X vectors allocated, "
"nic %d iscsi %d rdma cpl %d rdma ciq %d\n",
- allocated, s->max_ethqsets, s->ofldqsets, s->rdmaqs,
+ allocated, s->max_ethqsets, s->iscsiqsets, s->rdmaqs,
s->rdmaciqs);
kfree(entries);
@@ -4532,6 +4483,79 @@ static int init_rss(struct adapter *adap)
return 0;
}
+static int cxgb4_get_pcie_dev_link_caps(struct adapter *adap,
+ enum pci_bus_speed *speed,
+ enum pcie_link_width *width)
+{
+ u32 lnkcap1, lnkcap2;
+ int err1, err2;
+
+#define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */
+
+ *speed = PCI_SPEED_UNKNOWN;
+ *width = PCIE_LNK_WIDTH_UNKNOWN;
+
+ err1 = pcie_capability_read_dword(adap->pdev, PCI_EXP_LNKCAP,
+ &lnkcap1);
+ err2 = pcie_capability_read_dword(adap->pdev, PCI_EXP_LNKCAP2,
+ &lnkcap2);
+ if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */
+ if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
+ *speed = PCIE_SPEED_8_0GT;
+ else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
+ *speed = PCIE_SPEED_5_0GT;
+ else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
+ *speed = PCIE_SPEED_2_5GT;
+ }
+ if (!err1) {
+ *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT;
+ if (!lnkcap2) { /* pre-r3.0 */
+ if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB)
+ *speed = PCIE_SPEED_5_0GT;
+ else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB)
+ *speed = PCIE_SPEED_2_5GT;
+ }
+ }
+
+ if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
+ return err1 ? err1 : err2 ? err2 : -EINVAL;
+ return 0;
+}
+
+static void cxgb4_check_pcie_caps(struct adapter *adap)
+{
+ enum pcie_link_width width, width_cap;
+ enum pci_bus_speed speed, speed_cap;
+
+#define PCIE_SPEED_STR(speed) \
+ (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \
+ speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \
+ speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \
+ "Unknown")
+
+ if (cxgb4_get_pcie_dev_link_caps(adap, &speed_cap, &width_cap)) {
+ dev_warn(adap->pdev_dev,
+ "Unable to determine PCIe device BW capabilities\n");
+ return;
+ }
+
+ if (pcie_get_minimum_link(adap->pdev, &speed, &width) ||
+ speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) {
+ dev_warn(adap->pdev_dev,
+ "Unable to determine PCI Express bandwidth.\n");
+ return;
+ }
+
+ dev_info(adap->pdev_dev, "PCIe link speed is %s, device supports %s\n",
+ PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap));
+ dev_info(adap->pdev_dev, "PCIe link width is x%d, device supports x%d\n",
+ width, width_cap);
+ if (speed < speed_cap || width < width_cap)
+ dev_info(adap->pdev_dev,
+ "A slot with more lanes and/or higher speed is "
+ "suggested for optimal performance.\n");
+}
+
static void print_port_info(const struct net_device *dev)
{
char buf[80];
@@ -4559,10 +4583,10 @@ static void print_port_info(const struct net_device *dev)
--bufp;
sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
- netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
+ netdev_info(dev, "Chelsio %s rev %d %s %sNIC %s\n",
adap->params.vpd.id,
CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
- is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
+ is_offload(adap) ? "R" : "",
(adap->flags & USING_MSIX) ? " MSI-X" :
(adap->flags & USING_MSI) ? " MSI" : "");
netdev_info(dev, "S/N: %s, P/N: %s\n",
@@ -4781,8 +4805,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* configure SGE_STAT_CFG_A to read WC stats */
if (!is_t4(adapter->params.chip))
- t4_write_reg(adapter, SGE_STAT_CFG_A,
- STATSOURCE_T5_V(7) | STATMODE_V(0));
+ t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7) |
+ (is_t5(adapter->params.chip) ? STATMODE_V(0) :
+ T6_STATMODE_V(0)));
for_each_port(adapter, i) {
struct net_device *netdev;
@@ -4908,6 +4933,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
else if (msi > 0 && pci_enable_msi(pdev) == 0)
adapter->flags |= USING_MSI;
+ /* check for PCI Express bandwidth capabiltites */
+ cxgb4_check_pcie_caps(adapter);
+
err = init_rss(adapter);
if (err)
goto out_free_dev;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 8d35ce3..b4eb468 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2288,7 +2288,7 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
if (likely(work_done < budget)) {
int timer_index;
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
timer_index = QINTR_TIMER_IDX_G(q->next_intr_params);
if (q->adaptive_rx) {
@@ -2555,7 +2555,8 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
iq->size = roundup(iq->size, 16);
iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0,
- &iq->phys_addr, NULL, 0, NUMA_NO_NODE);
+ &iq->phys_addr, NULL, 0,
+ dev_to_node(adap->pdev_dev));
if (!iq->desc)
return -ENOMEM;
@@ -2595,7 +2596,8 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
fl->size = roundup(fl->size, 8);
fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
sizeof(struct rx_sw_desc), &fl->addr,
- &fl->sdesc, s->stat_len, NUMA_NO_NODE);
+ &fl->sdesc, s->stat_len,
+ dev_to_node(adap->pdev_dev));
if (!fl->desc)
goto fl_nomem;
@@ -2668,8 +2670,9 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
* simple (and hopefully less wrong).
*/
if (!is_t4(adap->params.chip) && cong >= 0) {
- u32 param, val;
+ u32 param, val, ch_map = 0;
int i;
+ u16 cng_ch_bits_log = adap->params.arch.cng_ch_bits_log;
param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
@@ -2681,9 +2684,9 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_CHANNEL_X);
for (i = 0; i < 4; i++) {
if (cong & (1 << i))
- val |=
- CONMCTXT_CNGCHMAP_V(1 << (i << 2));
+ ch_map |= 1 << (i << cng_ch_bits_log);
}
+ val |= CONMCTXT_CNGCHMAP_V(ch_map);
}
ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
&param, &val);
@@ -2978,7 +2981,7 @@ void t4_free_sge_resources(struct adapter *adap)
}
/* clean up RDMA and iSCSI Rx queues */
- t4_free_ofld_rxqs(adap, adap->sge.ofldqsets, adap->sge.ofldrxq);
+ t4_free_ofld_rxqs(adap, adap->sge.iscsiqsets, adap->sge.iscsirxq);
t4_free_ofld_rxqs(adap, adap->sge.rdmaqs, adap->sge.rdmarxq);
t4_free_ofld_rxqs(adap, adap->sge.rdmaciqs, adap->sge.rdmaciq);
@@ -3171,8 +3174,7 @@ static int t4_sge_init_soft(struct adapter *adap)
int t4_sge_init(struct adapter *adap)
{
struct sge *s = &adap->sge;
- u32 sge_control, sge_control2, sge_conm_ctrl;
- unsigned int ingpadboundary, ingpackboundary;
+ u32 sge_control, sge_conm_ctrl;
int ret, egress_threshold;
/*
@@ -3183,35 +3185,7 @@ int t4_sge_init(struct adapter *adap)
s->pktshift = PKTSHIFT_G(sge_control);
s->stat_len = (sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64;
- /* T4 uses a single control field to specify both the PCIe Padding and
- * Packing Boundary. T5 introduced the ability to specify these
- * separately. The actual Ingress Packet Data alignment boundary
- * within Packed Buffer Mode is the maximum of these two
- * specifications. (Note that it makes no real practical sense to
- * have the Pading Boudary be larger than the Packing Boundary but you
- * could set the chip up that way and, in fact, legacy T4 code would
- * end doing this because it would initialize the Padding Boundary and
- * leave the Packing Boundary initialized to 0 (16 bytes).)
- */
- ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) +
- INGPADBOUNDARY_SHIFT_X);
- if (is_t4(adap->params.chip)) {
- s->fl_align = ingpadboundary;
- } else {
- /* T5 has a different interpretation of one of the PCIe Packing
- * Boundary values.
- */
- sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A);
- ingpackboundary = INGPACKBOUNDARY_G(sge_control2);
- if (ingpackboundary == INGPACKBOUNDARY_16B_X)
- ingpackboundary = 16;
- else
- ingpackboundary = 1 << (ingpackboundary +
- INGPACKBOUNDARY_SHIFT_X);
-
- s->fl_align = max(ingpadboundary, ingpackboundary);
- }
-
+ s->fl_align = t4_fl_pkt_align(adap);
ret = t4_sge_init_soft(adap);
if (ret < 0)
return ret;
@@ -3229,10 +3203,21 @@ int t4_sge_init(struct adapter *adap)
* buffers.
*/
sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL_A);
- if (is_t4(adap->params.chip))
+ switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
+ case CHELSIO_T4:
egress_threshold = EGRTHRESHOLD_G(sge_conm_ctrl);
- else
+ break;
+ case CHELSIO_T5:
egress_threshold = EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
+ break;
+ case CHELSIO_T6:
+ egress_threshold = T6_EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
+ break;
+ default:
+ dev_err(adap->pdev_dev, "Unsupported Chip version %d\n",
+ CHELSIO_CHIP_VERSION(adap->params.chip));
+ return -EINVAL;
+ }
s->fl_starve_thres = 2*egress_threshold + 1;
t4_idma_monitor_init(adap, &s->idma_monitor);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index cf61a58..636b469 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -1942,8 +1942,12 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x1190, 0x1194,
0x11a0, 0x11a4,
0x11b0, 0x11b4,
- 0x11fc, 0x1254,
- 0x1280, 0x133c,
+ 0x11fc, 0x1258,
+ 0x1280, 0x12d4,
+ 0x12d9, 0x12d9,
+ 0x12de, 0x12de,
+ 0x12e3, 0x12e3,
+ 0x12e8, 0x133c,
0x1800, 0x18fc,
0x3000, 0x302c,
0x3060, 0x30b0,
@@ -1973,7 +1977,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x5e50, 0x5e94,
0x5ea0, 0x5eb0,
0x5ec0, 0x5ec0,
- 0x5ec8, 0x5ecc,
+ 0x5ec8, 0x5ed0,
0x6000, 0x6020,
0x6028, 0x6040,
0x6058, 0x609c,
@@ -2048,7 +2052,8 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x19150, 0x19194,
0x1919c, 0x191b0,
0x191d0, 0x191e8,
- 0x19238, 0x192b0,
+ 0x19238, 0x19290,
+ 0x192a4, 0x192b0,
0x192bc, 0x192bc,
0x19348, 0x1934c,
0x193f8, 0x19418,
@@ -2442,7 +2447,8 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x40280, 0x40280,
0x40304, 0x40304,
0x40330, 0x4033c,
- 0x41304, 0x413c8,
+ 0x41304, 0x413b8,
+ 0x413c0, 0x413c8,
0x413d0, 0x413dc,
0x413f0, 0x413f0,
0x41400, 0x4140c,
@@ -5254,7 +5260,7 @@ void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
int i;
u32 data[2];
- for (i = 0; i < PM_NSTATS; i++) {
+ for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
t4_write_reg(adap, PM_TX_STAT_CONFIG_A, i + 1);
cnt[i] = t4_read_reg(adap, PM_TX_STAT_COUNT_A);
if (is_t4(adap->params.chip)) {
@@ -5281,7 +5287,7 @@ void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
int i;
u32 data[2];
- for (i = 0; i < PM_NSTATS; i++) {
+ for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
t4_write_reg(adap, PM_RX_STAT_CONFIG_A, i + 1);
cnt[i] = t4_read_reg(adap, PM_RX_STAT_COUNT_A);
if (is_t4(adap->params.chip)) {
@@ -5310,7 +5316,14 @@ unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
if (n == 0)
return idx == 0 ? 0xf : 0;
- if (n == 1)
+ /* In T6 (which is a 2 port card),
+ * port 0 is mapped to channel 0 and port 1 is mapped to channel 1.
+ * For 2 port T4/T5 adapter,
+ * port 0 is mapped to channel 0 and 1,
+ * port 1 is mapped to channel 2 and 3.
+ */
+ if ((n == 1) &&
+ (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5))
return idx < 2 ? (3 << (2 * idx)) : 0;
return 1 << idx;
}
@@ -5689,6 +5702,39 @@ void t4_sge_decode_idma_state(struct adapter *adapter, int state)
"IDMA_FL_SEND_PADDING",
"IDMA_FL_SEND_COMPLETION_TO_IMSG",
};
+ static const char * const t6_decode[] = {
+ "IDMA_IDLE",
+ "IDMA_PUSH_MORE_CPL_FIFO",
+ "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
+ "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
+ "IDMA_PHYSADDR_SEND_PCIEHDR",
+ "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
+ "IDMA_PHYSADDR_SEND_PAYLOAD",
+ "IDMA_FL_REQ_DATA_FL",
+ "IDMA_FL_DROP",
+ "IDMA_FL_DROP_SEND_INC",
+ "IDMA_FL_H_REQ_HEADER_FL",
+ "IDMA_FL_H_SEND_PCIEHDR",
+ "IDMA_FL_H_PUSH_CPL_FIFO",
+ "IDMA_FL_H_SEND_CPL",
+ "IDMA_FL_H_SEND_IP_HDR_FIRST",
+ "IDMA_FL_H_SEND_IP_HDR",
+ "IDMA_FL_H_REQ_NEXT_HEADER_FL",
+ "IDMA_FL_H_SEND_NEXT_PCIEHDR",
+ "IDMA_FL_H_SEND_IP_HDR_PADDING",
+ "IDMA_FL_D_SEND_PCIEHDR",
+ "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
+ "IDMA_FL_D_REQ_NEXT_DATA_FL",
+ "IDMA_FL_SEND_PCIEHDR",
+ "IDMA_FL_PUSH_CPL_FIFO",
+ "IDMA_FL_SEND_CPL",
+ "IDMA_FL_SEND_PAYLOAD_FIRST",
+ "IDMA_FL_SEND_PAYLOAD",
+ "IDMA_FL_REQ_NEXT_DATA_FL",
+ "IDMA_FL_SEND_NEXT_PCIEHDR",
+ "IDMA_FL_SEND_PADDING",
+ "IDMA_FL_SEND_COMPLETION_TO_IMSG",
+ };
static const u32 sge_regs[] = {
SGE_DEBUG_DATA_LOW_INDEX_2_A,
SGE_DEBUG_DATA_LOW_INDEX_3_A,
@@ -5697,6 +5743,32 @@ void t4_sge_decode_idma_state(struct adapter *adapter, int state)
const char **sge_idma_decode;
int sge_idma_decode_nstates;
int i;
+ unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
+
+ /* Select the right set of decode strings to dump depending on the
+ * adapter chip type.
+ */
+ switch (chip_version) {
+ case CHELSIO_T4:
+ sge_idma_decode = (const char **)t4_decode;
+ sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
+ break;
+
+ case CHELSIO_T5:
+ sge_idma_decode = (const char **)t5_decode;
+ sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
+ break;
+
+ case CHELSIO_T6:
+ sge_idma_decode = (const char **)t6_decode;
+ sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
+ break;
+
+ default:
+ dev_err(adapter->pdev_dev,
+ "Unsupported chip version %d\n", chip_version);
+ return;
+ }
if (is_t4(adapter->params.chip)) {
sge_idma_decode = (const char **)t4_decode;
@@ -6097,6 +6169,59 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
}
/**
+ * t4_fl_pkt_align - return the fl packet alignment
+ * @adap: the adapter
+ *
+ * T4 has a single field to specify the packing and padding boundary.
+ * T5 onwards has separate fields for this and hence the alignment for
+ * next packet offset is maximum of these two.
+ *
+ */
+int t4_fl_pkt_align(struct adapter *adap)
+{
+ u32 sge_control, sge_control2;
+ unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
+
+ sge_control = t4_read_reg(adap, SGE_CONTROL_A);
+
+ /* T4 uses a single control field to specify both the PCIe Padding and
+ * Packing Boundary. T5 introduced the ability to specify these
+ * separately. The actual Ingress Packet Data alignment boundary
+ * within Packed Buffer Mode is the maximum of these two
+ * specifications. (Note that it makes no real practical sense to
+ * have the Pading Boudary be larger than the Packing Boundary but you
+ * could set the chip up that way and, in fact, legacy T4 code would
+ * end doing this because it would initialize the Padding Boundary and
+ * leave the Packing Boundary initialized to 0 (16 bytes).)
+ * Padding Boundary values in T6 starts from 8B,
+ * where as it is 32B for T4 and T5.
+ */
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+ ingpad_shift = INGPADBOUNDARY_SHIFT_X;
+ else
+ ingpad_shift = T6_INGPADBOUNDARY_SHIFT_X;
+
+ ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) + ingpad_shift);
+
+ fl_align = ingpadboundary;
+ if (!is_t4(adap->params.chip)) {
+ /* T5 has a weird interpretation of one of the PCIe Packing
+ * Boundary values. No idea why ...
+ */
+ sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A);
+ ingpackboundary = INGPACKBOUNDARY_G(sge_control2);
+ if (ingpackboundary == INGPACKBOUNDARY_16B_X)
+ ingpackboundary = 16;
+ else
+ ingpackboundary = 1 << (ingpackboundary +
+ INGPACKBOUNDARY_SHIFT_X);
+
+ fl_align = max(ingpadboundary, ingpackboundary);
+ }
+ return fl_align;
+}
+
+/**
* t4_fixup_host_params - fix up host-dependent parameters
* @adap: the adapter
* @page_size: the host's Base Page Size
@@ -6114,6 +6239,7 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
unsigned int fl_align_log = fls(fl_align) - 1;
+ unsigned int ingpad;
t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A,
HOSTPAGESIZEPF0_V(sge_hps) |
@@ -6161,10 +6287,16 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
fl_align = 64;
fl_align_log = 6;
}
+
+ if (is_t5(adap->params.chip))
+ ingpad = INGPCIEBOUNDARY_32B_X;
+ else
+ ingpad = T6_INGPADBOUNDARY_32B_X;
+
t4_set_reg_field(adap, SGE_CONTROL_A,
INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
EGRSTATUSPAGESIZE_F,
- INGPADBOUNDARY_V(INGPCIEBOUNDARY_32B_X) |
+ INGPADBOUNDARY_V(ingpad) |
EGRSTATUSPAGESIZE_V(stat_len != 64));
t4_set_reg_field(adap, SGE_CONTROL2_A,
INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
@@ -7060,7 +7192,12 @@ int t4_prep_adapter(struct adapter *adapter)
NUM_MPS_CLS_SRAM_L_INSTANCES;
adapter->params.arch.mps_rplc_size = 128;
adapter->params.arch.nchan = NCHAN;
+ adapter->params.arch.pm_stats_cnt = PM_NSTATS;
adapter->params.arch.vfcount = 128;
+ /* Congestion map is for 4 channels so that
+ * MPS can have 4 priority per port.
+ */
+ adapter->params.arch.cng_ch_bits_log = 2;
break;
case CHELSIO_T5:
adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
@@ -7069,7 +7206,9 @@ int t4_prep_adapter(struct adapter *adapter)
NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
adapter->params.arch.mps_rplc_size = 128;
adapter->params.arch.nchan = NCHAN;
+ adapter->params.arch.pm_stats_cnt = PM_NSTATS;
adapter->params.arch.vfcount = 128;
+ adapter->params.arch.cng_ch_bits_log = 2;
break;
case CHELSIO_T6:
adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
@@ -7078,7 +7217,12 @@ int t4_prep_adapter(struct adapter *adapter)
NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
adapter->params.arch.mps_rplc_size = 256;
adapter->params.arch.nchan = 2;
+ adapter->params.arch.pm_stats_cnt = T6_PM_NSTATS;
adapter->params.arch.vfcount = 256;
+ /* Congestion map will be for 2 channels so that
+ * MPS can have 8 priority per port.
+ */
+ adapter->params.arch.cng_ch_bits_log = 3;
break;
default:
dev_err(adapter->pdev_dev, "Device %d is not supported\n",
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index 13708fd..2fc60e8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -48,6 +48,7 @@ enum {
NMTUS = 16, /* size of MTU table */
NCCTRL_WIN = 32, /* # of congestion control windows */
PM_NSTATS = 5, /* # of PM stats */
+ T6_PM_NSTATS = 7, /* # of PM stats in T6 */
MBOX_LEN = 64, /* mailbox size in bytes */
TRACE_LEN = 112, /* length of trace data and mask */
FILTER_OPT_LEN = 36, /* filter tuple width for optional components */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 91b52a2..9fea255 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -492,6 +492,9 @@
#define STATSOURCE_T5_V(x) ((x) << STATSOURCE_T5_S)
#define STATSOURCE_T5_G(x) (((x) >> STATSOURCE_T5_S) & STATSOURCE_T5_M)
+#define T6_STATMODE_S 0
+#define T6_STATMODE_V(x) ((x) << T6_STATMODE_S)
+
#define SGE_DBFIFO_STATUS2_A 0x1118
#define HP_INT_THRESH_T5_S 10
@@ -2395,6 +2398,30 @@
#define MPS_CLS_TCAM_DATA0_A 0xf000
#define MPS_CLS_TCAM_DATA1_A 0xf004
+#define VIDL_S 16
+#define VIDL_M 0xffffU
+#define VIDL_G(x) (((x) >> VIDL_S) & VIDL_M)
+
+#define DATALKPTYPE_S 10
+#define DATALKPTYPE_M 0x3U
+#define DATALKPTYPE_G(x) (((x) >> DATALKPTYPE_S) & DATALKPTYPE_M)
+
+#define DATAPORTNUM_S 12
+#define DATAPORTNUM_M 0xfU
+#define DATAPORTNUM_G(x) (((x) >> DATAPORTNUM_S) & DATAPORTNUM_M)
+
+#define DATADIPHIT_S 8
+#define DATADIPHIT_V(x) ((x) << DATADIPHIT_S)
+#define DATADIPHIT_F DATADIPHIT_V(1U)
+
+#define DATAVIDH2_S 7
+#define DATAVIDH2_V(x) ((x) << DATAVIDH2_S)
+#define DATAVIDH2_F DATAVIDH2_V(1U)
+
+#define DATAVIDH1_S 0
+#define DATAVIDH1_M 0x7fU
+#define DATAVIDH1_G(x) (((x) >> DATAVIDH1_S) & DATAVIDH1_M)
+
#define USED_S 16
#define USED_M 0x7ffU
#define USED_G(x) (((x) >> USED_S) & USED_M)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
index 7bdee3b..a5231fa 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
@@ -53,6 +53,9 @@
#define INGPADBOUNDARY_SHIFT_X 5
+#define T6_INGPADBOUNDARY_SHIFT_X 3
+#define T6_INGPADBOUNDARY_32B_X 2
+
/* CONTROL2 register */
#define INGPACKBOUNDARY_SHIFT_X 5
#define INGPACKBOUNDARY_16B_X 0
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index fa3786a..6528231 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -2607,7 +2607,7 @@ int t4vf_sge_init(struct adapter *adapter)
u32 fl0 = sge_params->sge_fl_buffer_size[0];
u32 fl1 = sge_params->sge_fl_buffer_size[1];
struct sge *s = &adapter->sge;
- unsigned int ingpadboundary, ingpackboundary;
+ unsigned int ingpadboundary, ingpackboundary, ingpad_shift;
/*
* Start by vetting the basic SGE parameters which have been set up by
@@ -2642,9 +2642,16 @@ int t4vf_sge_init(struct adapter *adapter)
* could set the chip up that way and, in fact, legacy T4 code would
* end doing this because it would initialize the Padding Boundary and
* leave the Packing Boundary initialized to 0 (16 bytes).)
+ * Padding Boundary values in T6 starts from 8B,
+ * where as it is 32B for T4 and T5.
*/
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+ ingpad_shift = INGPADBOUNDARY_SHIFT_X;
+ else
+ ingpad_shift = T6_INGPADBOUNDARY_SHIFT_X;
+
ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_params->sge_control) +
- INGPADBOUNDARY_SHIFT_X);
+ ingpad_shift);
if (is_t4(adapter->params.chip)) {
s->fl_align = ingpadboundary;
} else {
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h
index b516b12..f859db3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h
@@ -54,6 +54,7 @@
#define T4VF_MPS_BASE_ADDR 0x0100
#define T4VF_PL_BASE_ADDR 0x0200
#define T4VF_MBDATA_BASE_ADDR 0x0240
+#define T6VF_MBDATA_BASE_ADDR 0x0280
#define T4VF_CIM_BASE_ADDR 0x0300
#define T4VF_REGMAP_START 0x0000
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 63dd5fd..b6fa74a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -120,12 +120,19 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
1, 1, 3, 5, 10, 10, 20, 50, 100
};
- u32 v;
+ u32 v, mbox_data;
int i, ms, delay_idx;
const __be64 *p;
- u32 mbox_data = T4VF_MBDATA_BASE_ADDR;
u32 mbox_ctl = T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL;
+ /* In T6, mailbox size is changed to 128 bytes to avoid
+ * invalidating the entire prefetch buffer.
+ */
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+ mbox_data = T4VF_MBDATA_BASE_ADDR;
+ else
+ mbox_data = T6VF_MBDATA_BASE_ADDR;
+
/*
* Commands must be multiples of 16 bytes in length and may not be
* larger than the size of the Mailbox Data register array.
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index d463563..22bf7af 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -37,7 +37,7 @@
#include "be_hw.h"
#include "be_roce.h"
-#define DRV_VER "10.6.0.3"
+#define DRV_VER "11.0.0.0"
#define DRV_NAME "be2net"
#define BE_NAME "Emulex BladeEngine2"
#define BE3_NAME "Emulex BladeEngine3"
@@ -521,7 +521,7 @@ struct be_adapter {
struct be_drv_stats drv_stats;
struct be_aic_obj aic_obj[MAX_EVT_QS];
u8 vlan_prio_bmap; /* Available Priority BitMap */
- u16 recommended_prio; /* Recommended Priority */
+ u16 recommended_prio_bits;/* Recommended Priority bits in vlan tag */
struct be_dma_mem rx_filter; /* Cmd DMA mem for rx-filter */
struct be_dma_mem stats_cmd;
@@ -547,10 +547,6 @@ struct be_adapter {
u32 beacon_state; /* for set_phys_id */
- bool eeh_error;
- bool fw_timeout;
- bool hw_error;
-
u32 port_num;
char port_name;
u8 mc_type;
@@ -574,6 +570,8 @@ struct be_adapter {
struct be_resources pool_res; /* resources available for the port */
struct be_resources res; /* resources available for the func */
u16 num_vfs; /* Number of VFs provisioned by PF */
+ u8 pf_num; /* Numbering used by FW, starts at 0 */
+ u8 vf_num; /* Numbering used by FW, starts at 1 */
u8 virtfn;
struct be_vf_cfg *vf_cfg;
bool be3_native;
@@ -591,11 +589,10 @@ struct be_adapter {
u32 msg_enable;
int be_get_temp_freq;
struct be_hwmon hwmon_info;
- u8 pf_number;
- u8 pci_func_num;
struct rss_info rss_info;
/* Filters for packets that need to be sent to BMC */
u32 bmc_filt_mask;
+ u32 fat_dump_len;
u16 serial_num[CNTL_SERIAL_NUM_WORDS];
};
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 1795c93..b63d8ad 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -308,8 +308,7 @@ static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
if (evt->valid) {
adapter->vlan_prio_bmap = evt->available_priority_bmap;
- adapter->recommended_prio &= ~VLAN_PRIO_MASK;
- adapter->recommended_prio =
+ adapter->recommended_prio_bits =
evt->reco_default_priority << VLAN_PRIO_SHIFT;
}
}
@@ -1713,49 +1712,40 @@ err:
}
/* Uses synchronous mcc */
-int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
+int be_cmd_get_fat_dump_len(struct be_adapter *adapter, u32 *dump_size)
{
- struct be_mcc_wrb *wrb;
+ struct be_mcc_wrb wrb = {0};
struct be_cmd_req_get_fat *req;
int status;
- spin_lock_bh(&adapter->mcc_lock);
-
- wrb = wrb_from_mccq(adapter);
- if (!wrb) {
- status = -EBUSY;
- goto err;
- }
- req = embedded_payload(wrb);
+ req = embedded_payload(&wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb,
- NULL);
+ OPCODE_COMMON_MANAGE_FAT, sizeof(*req),
+ &wrb, NULL);
req->fat_operation = cpu_to_le32(QUERY_FAT);
- status = be_mcc_notify_wait(adapter);
+ status = be_cmd_notify_wait(adapter, &wrb);
if (!status) {
- struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
+ struct be_cmd_resp_get_fat *resp = embedded_payload(&wrb);
- if (log_size && resp->log_size)
- *log_size = le32_to_cpu(resp->log_size) -
+ if (dump_size && resp->log_size)
+ *dump_size = le32_to_cpu(resp->log_size) -
sizeof(u32);
}
-err:
- spin_unlock_bh(&adapter->mcc_lock);
return status;
}
-int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
+int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf)
{
struct be_dma_mem get_fat_cmd;
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_fat *req;
u32 offset = 0, total_size, buf_size,
log_offset = sizeof(u32), payload_len;
- int status = 0;
+ int status;
if (buf_len == 0)
- return -EIO;
+ return 0;
total_size = buf_len;
@@ -1763,11 +1753,8 @@ int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
get_fat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
get_fat_cmd.size,
&get_fat_cmd.dma, GFP_ATOMIC);
- if (!get_fat_cmd.va) {
- dev_err(&adapter->pdev->dev,
- "Memory allocation failure while reading FAT data\n");
+ if (!get_fat_cmd.va)
return -ENOMEM;
- }
spin_lock_bh(&adapter->mcc_lock);
@@ -2291,10 +2278,11 @@ err:
return status;
}
-int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
- u32 data_size, u32 data_offset,
- const char *obj_name, u32 *data_written,
- u8 *change_status, u8 *addn_status)
+static int lancer_cmd_write_object(struct be_adapter *adapter,
+ struct be_dma_mem *cmd, u32 data_size,
+ u32 data_offset, const char *obj_name,
+ u32 *data_written, u8 *change_status,
+ u8 *addn_status)
{
struct be_mcc_wrb *wrb;
struct lancer_cmd_req_write_object *req;
@@ -2410,7 +2398,8 @@ int be_cmd_query_sfp_info(struct be_adapter *adapter)
return status;
}
-int lancer_cmd_delete_object(struct be_adapter *adapter, const char *obj_name)
+static int lancer_cmd_delete_object(struct be_adapter *adapter,
+ const char *obj_name)
{
struct lancer_cmd_req_delete_object *req;
struct be_mcc_wrb *wrb;
@@ -2485,9 +2474,9 @@ err_unlock:
return status;
}
-int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
- u32 flash_type, u32 flash_opcode, u32 img_offset,
- u32 buf_size)
+static int be_cmd_write_flashrom(struct be_adapter *adapter,
+ struct be_dma_mem *cmd, u32 flash_type,
+ u32 flash_opcode, u32 img_offset, u32 buf_size)
{
struct be_mcc_wrb *wrb;
struct be_cmd_write_flashrom *req;
@@ -2533,8 +2522,8 @@ err_unlock:
return status;
}
-int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
- u16 img_optype, u32 img_offset, u32 crc_offset)
+static int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
+ u16 img_optype, u32 img_offset, u32 crc_offset)
{
struct be_cmd_read_flash_crc *req;
struct be_mcc_wrb *wrb;
@@ -2571,6 +2560,579 @@ err:
return status;
}
+static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
+
+static bool phy_flashing_required(struct be_adapter *adapter)
+{
+ return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
+ adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
+}
+
+static bool is_comp_in_ufi(struct be_adapter *adapter,
+ struct flash_section_info *fsec, int type)
+{
+ int i = 0, img_type = 0;
+ struct flash_section_info_g2 *fsec_g2 = NULL;
+
+ if (BE2_chip(adapter))
+ fsec_g2 = (struct flash_section_info_g2 *)fsec;
+
+ for (i = 0; i < MAX_FLASH_COMP; i++) {
+ if (fsec_g2)
+ img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
+ else
+ img_type = le32_to_cpu(fsec->fsec_entry[i].type);
+
+ if (img_type == type)
+ return true;
+ }
+ return false;
+}
+
+static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
+ int header_size,
+ const struct firmware *fw)
+{
+ struct flash_section_info *fsec = NULL;
+ const u8 *p = fw->data;
+
+ p += header_size;
+ while (p < (fw->data + fw->size)) {
+ fsec = (struct flash_section_info *)p;
+ if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
+ return fsec;
+ p += 32;
+ }
+ return NULL;
+}
+
+static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
+ u32 img_offset, u32 img_size, int hdr_size,
+ u16 img_optype, bool *crc_match)
+{
+ u32 crc_offset;
+ int status;
+ u8 crc[4];
+
+ status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
+ img_size - 4);
+ if (status)
+ return status;
+
+ crc_offset = hdr_size + img_offset + img_size - 4;
+
+ /* Skip flashing, if crc of flashed region matches */
+ if (!memcmp(crc, p + crc_offset, 4))
+ *crc_match = true;
+ else
+ *crc_match = false;
+
+ return status;
+}
+
+static int be_flash(struct be_adapter *adapter, const u8 *img,
+ struct be_dma_mem *flash_cmd, int optype, int img_size,
+ u32 img_offset)
+{
+ u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
+ struct be_cmd_write_flashrom *req = flash_cmd->va;
+ int status;
+
+ while (total_bytes) {
+ num_bytes = min_t(u32, 32 * 1024, total_bytes);
+
+ total_bytes -= num_bytes;
+
+ if (!total_bytes) {
+ if (optype == OPTYPE_PHY_FW)
+ flash_op = FLASHROM_OPER_PHY_FLASH;
+ else
+ flash_op = FLASHROM_OPER_FLASH;
+ } else {
+ if (optype == OPTYPE_PHY_FW)
+ flash_op = FLASHROM_OPER_PHY_SAVE;
+ else
+ flash_op = FLASHROM_OPER_SAVE;
+ }
+
+ memcpy(req->data_buf, img, num_bytes);
+ img += num_bytes;
+ status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
+ flash_op, img_offset +
+ bytes_sent, num_bytes);
+ if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
+ optype == OPTYPE_PHY_FW)
+ break;
+ else if (status)
+ return status;
+
+ bytes_sent += num_bytes;
+ }
+ return 0;
+}
+
+/* For BE2, BE3 and BE3-R */
+static int be_flash_BEx(struct be_adapter *adapter,
+ const struct firmware *fw,
+ struct be_dma_mem *flash_cmd, int num_of_images)
+{
+ int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
+ struct device *dev = &adapter->pdev->dev;
+ struct flash_section_info *fsec = NULL;
+ int status, i, filehdr_size, num_comp;
+ const struct flash_comp *pflashcomp;
+ bool crc_match;
+ const u8 *p;
+
+ struct flash_comp gen3_flash_types[] = {
+ { BE3_ISCSI_PRIMARY_IMAGE_START, OPTYPE_ISCSI_ACTIVE,
+ BE3_COMP_MAX_SIZE, IMAGE_FIRMWARE_ISCSI},
+ { BE3_REDBOOT_START, OPTYPE_REDBOOT,
+ BE3_REDBOOT_COMP_MAX_SIZE, IMAGE_BOOT_CODE},
+ { BE3_ISCSI_BIOS_START, OPTYPE_BIOS,
+ BE3_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_ISCSI},
+ { BE3_PXE_BIOS_START, OPTYPE_PXE_BIOS,
+ BE3_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_PXE},
+ { BE3_FCOE_BIOS_START, OPTYPE_FCOE_BIOS,
+ BE3_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_FCOE},
+ { BE3_ISCSI_BACKUP_IMAGE_START, OPTYPE_ISCSI_BACKUP,
+ BE3_COMP_MAX_SIZE, IMAGE_FIRMWARE_BACKUP_ISCSI},
+ { BE3_FCOE_PRIMARY_IMAGE_START, OPTYPE_FCOE_FW_ACTIVE,
+ BE3_COMP_MAX_SIZE, IMAGE_FIRMWARE_FCOE},
+ { BE3_FCOE_BACKUP_IMAGE_START, OPTYPE_FCOE_FW_BACKUP,
+ BE3_COMP_MAX_SIZE, IMAGE_FIRMWARE_BACKUP_FCOE},
+ { BE3_NCSI_START, OPTYPE_NCSI_FW,
+ BE3_NCSI_COMP_MAX_SIZE, IMAGE_NCSI},
+ { BE3_PHY_FW_START, OPTYPE_PHY_FW,
+ BE3_PHY_FW_COMP_MAX_SIZE, IMAGE_FIRMWARE_PHY}
+ };
+
+ struct flash_comp gen2_flash_types[] = {
+ { BE2_ISCSI_PRIMARY_IMAGE_START, OPTYPE_ISCSI_ACTIVE,
+ BE2_COMP_MAX_SIZE, IMAGE_FIRMWARE_ISCSI},
+ { BE2_REDBOOT_START, OPTYPE_REDBOOT,
+ BE2_REDBOOT_COMP_MAX_SIZE, IMAGE_BOOT_CODE},
+ { BE2_ISCSI_BIOS_START, OPTYPE_BIOS,
+ BE2_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_ISCSI},
+ { BE2_PXE_BIOS_START, OPTYPE_PXE_BIOS,
+ BE2_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_PXE},
+ { BE2_FCOE_BIOS_START, OPTYPE_FCOE_BIOS,
+ BE2_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_FCOE},
+ { BE2_ISCSI_BACKUP_IMAGE_START, OPTYPE_ISCSI_BACKUP,
+ BE2_COMP_MAX_SIZE, IMAGE_FIRMWARE_BACKUP_ISCSI},
+ { BE2_FCOE_PRIMARY_IMAGE_START, OPTYPE_FCOE_FW_ACTIVE,
+ BE2_COMP_MAX_SIZE, IMAGE_FIRMWARE_FCOE},
+ { BE2_FCOE_BACKUP_IMAGE_START, OPTYPE_FCOE_FW_BACKUP,
+ BE2_COMP_MAX_SIZE, IMAGE_FIRMWARE_BACKUP_FCOE}
+ };
+
+ if (BE3_chip(adapter)) {
+ pflashcomp = gen3_flash_types;
+ filehdr_size = sizeof(struct flash_file_hdr_g3);
+ num_comp = ARRAY_SIZE(gen3_flash_types);
+ } else {
+ pflashcomp = gen2_flash_types;
+ filehdr_size = sizeof(struct flash_file_hdr_g2);
+ num_comp = ARRAY_SIZE(gen2_flash_types);
+ img_hdrs_size = 0;
+ }
+
+ /* Get flash section info*/
+ fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
+ if (!fsec) {
+ dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
+ return -1;
+ }
+ for (i = 0; i < num_comp; i++) {
+ if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
+ continue;
+
+ if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
+ memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
+ continue;
+
+ if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
+ !phy_flashing_required(adapter))
+ continue;
+
+ if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
+ status = be_check_flash_crc(adapter, fw->data,
+ pflashcomp[i].offset,
+ pflashcomp[i].size,
+ filehdr_size +
+ img_hdrs_size,
+ OPTYPE_REDBOOT, &crc_match);
+ if (status) {
+ dev_err(dev,
+ "Could not get CRC for 0x%x region\n",
+ pflashcomp[i].optype);
+ continue;
+ }
+
+ if (crc_match)
+ continue;
+ }
+
+ p = fw->data + filehdr_size + pflashcomp[i].offset +
+ img_hdrs_size;
+ if (p + pflashcomp[i].size > fw->data + fw->size)
+ return -1;
+
+ status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
+ pflashcomp[i].size, 0);
+ if (status) {
+ dev_err(dev, "Flashing section type 0x%x failed\n",
+ pflashcomp[i].img_type);
+ return status;
+ }
+ }
+ return 0;
+}
+
+static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
+{
+ u32 img_type = le32_to_cpu(fsec_entry.type);
+ u16 img_optype = le16_to_cpu(fsec_entry.optype);
+
+ if (img_optype != 0xFFFF)
+ return img_optype;
+
+ switch (img_type) {
+ case IMAGE_FIRMWARE_ISCSI:
+ img_optype = OPTYPE_ISCSI_ACTIVE;
+ break;
+ case IMAGE_BOOT_CODE:
+ img_optype = OPTYPE_REDBOOT;
+ break;
+ case IMAGE_OPTION_ROM_ISCSI:
+ img_optype = OPTYPE_BIOS;
+ break;
+ case IMAGE_OPTION_ROM_PXE:
+ img_optype = OPTYPE_PXE_BIOS;
+ break;
+ case IMAGE_OPTION_ROM_FCOE:
+ img_optype = OPTYPE_FCOE_BIOS;
+ break;
+ case IMAGE_FIRMWARE_BACKUP_ISCSI:
+ img_optype = OPTYPE_ISCSI_BACKUP;
+ break;
+ case IMAGE_NCSI:
+ img_optype = OPTYPE_NCSI_FW;
+ break;
+ case IMAGE_FLASHISM_JUMPVECTOR:
+ img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
+ break;
+ case IMAGE_FIRMWARE_PHY:
+ img_optype = OPTYPE_SH_PHY_FW;
+ break;
+ case IMAGE_REDBOOT_DIR:
+ img_optype = OPTYPE_REDBOOT_DIR;
+ break;
+ case IMAGE_REDBOOT_CONFIG:
+ img_optype = OPTYPE_REDBOOT_CONFIG;
+ break;
+ case IMAGE_UFI_DIR:
+ img_optype = OPTYPE_UFI_DIR;
+ break;
+ default:
+ break;
+ }
+
+ return img_optype;
+}
+
+static int be_flash_skyhawk(struct be_adapter *adapter,
+ const struct firmware *fw,
+ struct be_dma_mem *flash_cmd, int num_of_images)
+{
+ int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
+ bool crc_match, old_fw_img, flash_offset_support = true;
+ struct device *dev = &adapter->pdev->dev;
+ struct flash_section_info *fsec = NULL;
+ u32 img_offset, img_size, img_type;
+ u16 img_optype, flash_optype;
+ int status, i, filehdr_size;
+ const u8 *p;
+
+ filehdr_size = sizeof(struct flash_file_hdr_g3);
+ fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
+ if (!fsec) {
+ dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
+ return -EINVAL;
+ }
+
+retry_flash:
+ for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
+ img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
+ img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
+ img_type = le32_to_cpu(fsec->fsec_entry[i].type);
+ img_optype = be_get_img_optype(fsec->fsec_entry[i]);
+ old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
+
+ if (img_optype == 0xFFFF)
+ continue;
+
+ if (flash_offset_support)
+ flash_optype = OPTYPE_OFFSET_SPECIFIED;
+ else
+ flash_optype = img_optype;
+
+ /* Don't bother verifying CRC if an old FW image is being
+ * flashed
+ */
+ if (old_fw_img)
+ goto flash;
+
+ status = be_check_flash_crc(adapter, fw->data, img_offset,
+ img_size, filehdr_size +
+ img_hdrs_size, flash_optype,
+ &crc_match);
+ if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
+ base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
+ /* The current FW image on the card does not support
+ * OFFSET based flashing. Retry using older mechanism
+ * of OPTYPE based flashing
+ */
+ if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
+ flash_offset_support = false;
+ goto retry_flash;
+ }
+
+ /* The current FW image on the card does not recognize
+ * the new FLASH op_type. The FW download is partially
+ * complete. Reboot the server now to enable FW image
+ * to recognize the new FLASH op_type. To complete the
+ * remaining process, download the same FW again after
+ * the reboot.
+ */
+ dev_err(dev, "Flash incomplete. Reset the server\n");
+ dev_err(dev, "Download FW image again after reset\n");
+ return -EAGAIN;
+ } else if (status) {
+ dev_err(dev, "Could not get CRC for 0x%x region\n",
+ img_optype);
+ return -EFAULT;
+ }
+
+ if (crc_match)
+ continue;
+
+flash:
+ p = fw->data + filehdr_size + img_offset + img_hdrs_size;
+ if (p + img_size > fw->data + fw->size)
+ return -1;
+
+ status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
+ img_offset);
+
+ /* The current FW image on the card does not support OFFSET
+ * based flashing. Retry using older mechanism of OPTYPE based
+ * flashing
+ */
+ if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
+ flash_optype == OPTYPE_OFFSET_SPECIFIED) {
+ flash_offset_support = false;
+ goto retry_flash;
+ }
+
+ /* For old FW images ignore ILLEGAL_FIELD error or errors on
+ * UFI_DIR region
+ */
+ if (old_fw_img &&
+ (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
+ (img_optype == OPTYPE_UFI_DIR &&
+ base_status(status) == MCC_STATUS_FAILED))) {
+ continue;
+ } else if (status) {
+ dev_err(dev, "Flashing section type 0x%x failed\n",
+ img_type);
+
+ switch (addl_status(status)) {
+ case MCC_ADDL_STATUS_MISSING_SIGNATURE:
+ dev_err(dev,
+ "Digital signature missing in FW\n");
+ return -EINVAL;
+ case MCC_ADDL_STATUS_INVALID_SIGNATURE:
+ dev_err(dev,
+ "Invalid digital signature in FW\n");
+ return -EINVAL;
+ default:
+ return -EFAULT;
+ }
+ }
+ }
+ return 0;
+}
+
+int lancer_fw_download(struct be_adapter *adapter,
+ const struct firmware *fw)
+{
+ struct device *dev = &adapter->pdev->dev;
+ struct be_dma_mem flash_cmd;
+ const u8 *data_ptr = NULL;
+ u8 *dest_image_ptr = NULL;
+ size_t image_size = 0;
+ u32 chunk_size = 0;
+ u32 data_written = 0;
+ u32 offset = 0;
+ int status = 0;
+ u8 add_status = 0;
+ u8 change_status;
+
+ if (!IS_ALIGNED(fw->size, sizeof(u32))) {
+ dev_err(dev, "FW image size should be multiple of 4\n");
+ return -EINVAL;
+ }
+
+ flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
+ + LANCER_FW_DOWNLOAD_CHUNK;
+ flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size,
+ &flash_cmd.dma, GFP_KERNEL);
+ if (!flash_cmd.va)
+ return -ENOMEM;
+
+ dest_image_ptr = flash_cmd.va +
+ sizeof(struct lancer_cmd_req_write_object);
+ image_size = fw->size;
+ data_ptr = fw->data;
+
+ while (image_size) {
+ chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
+
+ /* Copy the image chunk content. */
+ memcpy(dest_image_ptr, data_ptr, chunk_size);
+
+ status = lancer_cmd_write_object(adapter, &flash_cmd,
+ chunk_size, offset,
+ LANCER_FW_DOWNLOAD_LOCATION,
+ &data_written, &change_status,
+ &add_status);
+ if (status)
+ break;
+
+ offset += data_written;
+ data_ptr += data_written;
+ image_size -= data_written;
+ }
+
+ if (!status) {
+ /* Commit the FW written */
+ status = lancer_cmd_write_object(adapter, &flash_cmd,
+ 0, offset,
+ LANCER_FW_DOWNLOAD_LOCATION,
+ &data_written, &change_status,
+ &add_status);
+ }
+
+ dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
+ if (status) {
+ dev_err(dev, "Firmware load error\n");
+ return be_cmd_status(status);
+ }
+
+ dev_info(dev, "Firmware flashed successfully\n");
+
+ if (change_status == LANCER_FW_RESET_NEEDED) {
+ dev_info(dev, "Resetting adapter to activate new FW\n");
+ status = lancer_physdev_ctrl(adapter,
+ PHYSDEV_CONTROL_FW_RESET_MASK);
+ if (status) {
+ dev_err(dev, "Adapter busy, could not reset FW\n");
+ dev_err(dev, "Reboot server to activate new FW\n");
+ }
+ } else if (change_status != LANCER_NO_RESET_NEEDED) {
+ dev_info(dev, "Reboot server to activate new FW\n");
+ }
+
+ return 0;
+}
+
+/* Check if the flash image file is compatible with the adapter that
+ * is being flashed.
+ */
+static bool be_check_ufi_compatibility(struct be_adapter *adapter,
+ struct flash_file_hdr_g3 *fhdr)
+{
+ if (!fhdr) {
+ dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
+ return false;
+ }
+
+ /* First letter of the build version is used to identify
+ * which chip this image file is meant for.
+ */
+ switch (fhdr->build[0]) {
+ case BLD_STR_UFI_TYPE_SH:
+ if (!skyhawk_chip(adapter))
+ return false;
+ break;
+ case BLD_STR_UFI_TYPE_BE3:
+ if (!BE3_chip(adapter))
+ return false;
+ break;
+ case BLD_STR_UFI_TYPE_BE2:
+ if (!BE2_chip(adapter))
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ /* In BE3 FW images the "asic_type_rev" field doesn't track the
+ * asic_rev of the chips it is compatible with.
+ * When asic_type_rev is 0 the image is compatible only with
+ * pre-BE3-R chips (asic_rev < 0x10)
+ */
+ if (BEx_chip(adapter) && fhdr->asic_type_rev == 0)
+ return adapter->asic_rev < 0x10;
+ else
+ return (fhdr->asic_type_rev >= adapter->asic_rev);
+}
+
+int be_fw_download(struct be_adapter *adapter, const struct firmware *fw)
+{
+ struct device *dev = &adapter->pdev->dev;
+ struct flash_file_hdr_g3 *fhdr3;
+ struct image_hdr *img_hdr_ptr;
+ int status = 0, i, num_imgs;
+ struct be_dma_mem flash_cmd;
+
+ fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
+ if (!be_check_ufi_compatibility(adapter, fhdr3)) {
+ dev_err(dev, "Flash image is not compatible with adapter\n");
+ return -EINVAL;
+ }
+
+ flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
+ flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
+ GFP_KERNEL);
+ if (!flash_cmd.va)
+ return -ENOMEM;
+
+ num_imgs = le32_to_cpu(fhdr3->num_imgs);
+ for (i = 0; i < num_imgs; i++) {
+ img_hdr_ptr = (struct image_hdr *)(fw->data +
+ (sizeof(struct flash_file_hdr_g3) +
+ i * sizeof(struct image_hdr)));
+ if (!BE2_chip(adapter) &&
+ le32_to_cpu(img_hdr_ptr->imageid) != 1)
+ continue;
+
+ if (skyhawk_chip(adapter))
+ status = be_flash_skyhawk(adapter, fw, &flash_cmd,
+ num_imgs);
+ else
+ status = be_flash_BEx(adapter, fw, &flash_cmd,
+ num_imgs);
+ }
+
+ dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
+ if (!status)
+ dev_info(dev, "Firmware flashed successfully\n");
+
+ return status;
+}
+
int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
struct be_dma_mem *nonemb_cmd)
{
@@ -2892,7 +3454,6 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
if (!status) {
attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
adapter->hba_port_num = attribs->hba_attribs.phy_port;
- adapter->pci_func_num = attribs->pci_func_num;
serial_num = attribs->hba_attribs.controller_serial_number;
for (i = 0; i < CNTL_SERIAL_NUM_WORDS; i++)
adapter->serial_num[i] = le32_to_cpu(serial_num[i]) &
@@ -3575,14 +4136,16 @@ int be_cmd_query_port_name(struct be_adapter *adapter)
return status;
}
-/* Descriptor type */
-enum {
- FUNC_DESC = 1,
- VFT_DESC = 2
-};
-
+/* When more than 1 NIC descriptor is present in the descriptor list,
+ * the caller must specify the pf_num to obtain the NIC descriptor
+ * corresponding to its pci function.
+ * get_vft must be true when the caller wants the VF-template desc of the
+ * PF-pool.
+ * The pf_num should be set to PF_NUM_IGNORE when the caller knows
+ * that only it's NIC descriptor is present in the descriptor list.
+ */
static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count,
- int desc_type)
+ bool get_vft, u8 pf_num)
{
struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
struct be_nic_res_desc *nic;
@@ -3592,40 +4155,42 @@ static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count,
if (hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V0 ||
hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V1) {
nic = (struct be_nic_res_desc *)hdr;
- if (desc_type == FUNC_DESC ||
- (desc_type == VFT_DESC &&
- nic->flags & (1 << VFT_SHIFT)))
+
+ if ((pf_num == PF_NUM_IGNORE ||
+ nic->pf_num == pf_num) &&
+ (!get_vft || nic->flags & BIT(VFT_SHIFT)))
return nic;
}
-
hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
hdr = (void *)hdr + hdr->desc_len;
}
return NULL;
}
-static struct be_nic_res_desc *be_get_vft_desc(u8 *buf, u32 desc_count)
+static struct be_nic_res_desc *be_get_vft_desc(u8 *buf, u32 desc_count,
+ u8 pf_num)
{
- return be_get_nic_desc(buf, desc_count, VFT_DESC);
+ return be_get_nic_desc(buf, desc_count, true, pf_num);
}
-static struct be_nic_res_desc *be_get_func_nic_desc(u8 *buf, u32 desc_count)
+static struct be_nic_res_desc *be_get_func_nic_desc(u8 *buf, u32 desc_count,
+ u8 pf_num)
{
- return be_get_nic_desc(buf, desc_count, FUNC_DESC);
+ return be_get_nic_desc(buf, desc_count, false, pf_num);
}
-static struct be_pcie_res_desc *be_get_pcie_desc(u8 devfn, u8 *buf,
- u32 desc_count)
+static struct be_pcie_res_desc *be_get_pcie_desc(u8 *buf, u32 desc_count,
+ u8 pf_num)
{
struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
struct be_pcie_res_desc *pcie;
int i;
for (i = 0; i < desc_count; i++) {
- if ((hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 ||
- hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1)) {
- pcie = (struct be_pcie_res_desc *)hdr;
- if (pcie->pf_num == devfn)
+ if (hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 ||
+ hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1) {
+ pcie = (struct be_pcie_res_desc *)hdr;
+ if (pcie->pf_num == pf_num)
return pcie;
}
@@ -3710,13 +4275,23 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
u32 desc_count = le32_to_cpu(resp->desc_count);
struct be_nic_res_desc *desc;
- desc = be_get_func_nic_desc(resp->func_param, desc_count);
+ /* GET_FUNC_CONFIG returns resource descriptors of the
+ * current function only. So, pf_num should be set to
+ * PF_NUM_IGNORE.
+ */
+ desc = be_get_func_nic_desc(resp->func_param, desc_count,
+ PF_NUM_IGNORE);
if (!desc) {
status = -EINVAL;
goto err;
}
- adapter->pf_number = desc->pf_num;
- be_copy_nic_desc(res, desc);
+
+ /* Store pf_num & vf_num for later use in GET_PROFILE_CONFIG */
+ adapter->pf_num = desc->pf_num;
+ adapter->vf_num = desc->vf_num;
+
+ if (res)
+ be_copy_nic_desc(res, desc);
}
err:
mutex_unlock(&adapter->mbox_lock);
@@ -3726,10 +4301,7 @@ err:
return status;
}
-/* Will use MBOX only if MCCQ has not been created
- * non-zero domain => a PF is querying this on behalf of a VF
- * zero domain => a PF or a VF is querying this for itself
- */
+/* Will use MBOX only if MCCQ has not been created */
int be_cmd_get_profile_config(struct be_adapter *adapter,
struct be_resources *res, u8 query, u8 domain)
{
@@ -3759,12 +4331,7 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
if (!lancer_chip(adapter))
req->hdr.version = 1;
req->type = ACTIVE_PROFILE_TYPE;
- /* When a function is querying profile information relating to
- * itself hdr.pf_number must be set to it's pci_func_num + 1
- */
req->hdr.domain = domain;
- if (domain == 0)
- req->hdr.pf_num = adapter->pci_func_num + 1;
/* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the
* descriptors with all bits set to "1" for the fields which can be
@@ -3780,8 +4347,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
resp = cmd.va;
desc_count = le16_to_cpu(resp->desc_count);
- pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param,
- desc_count);
+ pcie = be_get_pcie_desc(resp->func_param, desc_count,
+ adapter->pf_num);
if (pcie)
res->max_vfs = le16_to_cpu(pcie->num_vfs);
@@ -3789,11 +4356,13 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
if (port)
adapter->mc_type = port->mc_type;
- nic = be_get_func_nic_desc(resp->func_param, desc_count);
+ nic = be_get_func_nic_desc(resp->func_param, desc_count,
+ adapter->pf_num);
if (nic)
be_copy_nic_desc(res, nic);
- vf_res = be_get_vft_desc(resp->func_param, desc_count);
+ vf_res = be_get_vft_desc(resp->func_param, desc_count,
+ adapter->pf_num);
if (vf_res)
res->vf_if_cap_flags = vf_res->cap_flags;
err:
@@ -3883,7 +4452,7 @@ int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
return be_cmd_set_qos(adapter, max_rate / 10, domain);
be_reset_nic_desc(&nic_desc);
- nic_desc.pf_num = adapter->pf_number;
+ nic_desc.pf_num = adapter->pf_num;
nic_desc.vf_num = domain;
nic_desc.bw_min = 0;
if (lancer_chip(adapter)) {
@@ -4260,16 +4829,13 @@ err:
return status;
}
-int be_cmd_set_logical_link_config(struct be_adapter *adapter,
- int link_state, u8 domain)
+int __be_cmd_set_logical_link_config(struct be_adapter *adapter,
+ int link_state, int version, u8 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_set_ll_link *req;
int status;
- if (BEx_chip(adapter) || lancer_chip(adapter))
- return -EOPNOTSUPP;
-
spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
@@ -4284,14 +4850,15 @@ int be_cmd_set_logical_link_config(struct be_adapter *adapter,
OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG,
sizeof(*req), wrb, NULL);
- req->hdr.version = 1;
+ req->hdr.version = version;
req->hdr.domain = domain;
- if (link_state == IFLA_VF_LINK_STATE_ENABLE)
- req->link_config |= 1;
+ if (link_state == IFLA_VF_LINK_STATE_ENABLE ||
+ link_state == IFLA_VF_LINK_STATE_AUTO)
+ req->link_config |= PLINK_ENABLE;
if (link_state == IFLA_VF_LINK_STATE_AUTO)
- req->link_config |= 1 << PLINK_TRACK_SHIFT;
+ req->link_config |= PLINK_TRACK;
status = be_mcc_notify_wait(adapter);
err:
@@ -4299,6 +4866,25 @@ err:
return status;
}
+int be_cmd_set_logical_link_config(struct be_adapter *adapter,
+ int link_state, u8 domain)
+{
+ int status;
+
+ if (BEx_chip(adapter))
+ return -EOPNOTSUPP;
+
+ status = __be_cmd_set_logical_link_config(adapter, link_state,
+ 2, domain);
+
+ /* Version 2 of the command will not be recognized by older FW.
+ * On such a failure issue version 1 of the command.
+ */
+ if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST)
+ status = __be_cmd_set_logical_link_config(adapter, link_state,
+ 1, domain);
+ return status;
+}
int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
{
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 91155ea..241819b 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -66,7 +66,9 @@ enum mcc_addl_status {
MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES = 0x16,
MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH = 0x4d,
MCC_ADDL_STATUS_TOO_MANY_INTERFACES = 0x4a,
- MCC_ADDL_STATUS_INSUFFICIENT_VLANS = 0xab
+ MCC_ADDL_STATUS_INSUFFICIENT_VLANS = 0xab,
+ MCC_ADDL_STATUS_INVALID_SIGNATURE = 0x56,
+ MCC_ADDL_STATUS_MISSING_SIGNATURE = 0x57
};
#define CQE_BASE_STATUS_MASK 0xFFFF
@@ -289,9 +291,7 @@ struct be_cmd_req_hdr {
u32 timeout; /* dword 1 */
u32 request_length; /* dword 2 */
u8 version; /* dword 3 */
- u8 rsvd1; /* dword 3 */
- u8 pf_num; /* dword 3 */
- u8 rsvd2; /* dword 3 */
+ u8 rsvd[3]; /* dword 3 */
};
#define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */
@@ -1207,68 +1207,85 @@ struct be_cmd_resp_get_beacon_state {
/* Flashrom related descriptors */
#define MAX_FLASH_COMP 32
-#define OPTYPE_ISCSI_ACTIVE 0
-#define OPTYPE_REDBOOT 1
-#define OPTYPE_BIOS 2
-#define OPTYPE_PXE_BIOS 3
-#define OPTYPE_OFFSET_SPECIFIED 7
-#define OPTYPE_FCOE_BIOS 8
-#define OPTYPE_ISCSI_BACKUP 9
-#define OPTYPE_FCOE_FW_ACTIVE 10
-#define OPTYPE_FCOE_FW_BACKUP 11
-#define OPTYPE_NCSI_FW 13
-#define OPTYPE_REDBOOT_DIR 18
-#define OPTYPE_REDBOOT_CONFIG 19
-#define OPTYPE_SH_PHY_FW 21
-#define OPTYPE_FLASHISM_JUMPVECTOR 22
-#define OPTYPE_UFI_DIR 23
-#define OPTYPE_PHY_FW 99
-
-#define FLASH_BIOS_IMAGE_MAX_SIZE_g2 262144 /* Max OPTION ROM image sz */
-#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g2 262144 /* Max Redboot image sz */
-#define FLASH_IMAGE_MAX_SIZE_g2 1310720 /* Max firmware image size */
-
-#define FLASH_NCSI_IMAGE_MAX_SIZE_g3 262144
-#define FLASH_PHY_FW_IMAGE_MAX_SIZE_g3 262144
-#define FLASH_BIOS_IMAGE_MAX_SIZE_g3 524288 /* Max OPTION ROM image sz */
-#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g3 1048576 /* Max Redboot image sz */
-#define FLASH_IMAGE_MAX_SIZE_g3 2097152 /* Max firmware image size */
-
-/* Offsets for components on Flash. */
-#define FLASH_REDBOOT_START_g2 0
-#define FLASH_FCoE_BIOS_START_g2 524288
-#define FLASH_iSCSI_PRIMARY_IMAGE_START_g2 1048576
-#define FLASH_iSCSI_BACKUP_IMAGE_START_g2 2359296
-#define FLASH_FCoE_PRIMARY_IMAGE_START_g2 3670016
-#define FLASH_FCoE_BACKUP_IMAGE_START_g2 4980736
-#define FLASH_iSCSI_BIOS_START_g2 7340032
-#define FLASH_PXE_BIOS_START_g2 7864320
-
-#define FLASH_REDBOOT_START_g3 262144
-#define FLASH_PHY_FW_START_g3 1310720
-#define FLASH_iSCSI_PRIMARY_IMAGE_START_g3 2097152
-#define FLASH_iSCSI_BACKUP_IMAGE_START_g3 4194304
-#define FLASH_FCoE_PRIMARY_IMAGE_START_g3 6291456
-#define FLASH_FCoE_BACKUP_IMAGE_START_g3 8388608
-#define FLASH_iSCSI_BIOS_START_g3 12582912
-#define FLASH_PXE_BIOS_START_g3 13107200
-#define FLASH_FCoE_BIOS_START_g3 13631488
-#define FLASH_NCSI_START_g3 15990784
-
-#define IMAGE_NCSI 16
-#define IMAGE_OPTION_ROM_PXE 32
-#define IMAGE_OPTION_ROM_FCoE 33
-#define IMAGE_OPTION_ROM_ISCSI 34
-#define IMAGE_FLASHISM_JUMPVECTOR 48
-#define IMAGE_FIRMWARE_iSCSI 160
-#define IMAGE_FIRMWARE_FCoE 162
-#define IMAGE_FIRMWARE_BACKUP_iSCSI 176
-#define IMAGE_FIRMWARE_BACKUP_FCoE 178
-#define IMAGE_FIRMWARE_PHY 192
-#define IMAGE_REDBOOT_DIR 208
-#define IMAGE_REDBOOT_CONFIG 209
-#define IMAGE_UFI_DIR 210
-#define IMAGE_BOOT_CODE 224
+/* Optypes of each component in the UFI */
+enum {
+ OPTYPE_ISCSI_ACTIVE = 0,
+ OPTYPE_REDBOOT = 1,
+ OPTYPE_BIOS = 2,
+ OPTYPE_PXE_BIOS = 3,
+ OPTYPE_OFFSET_SPECIFIED = 7,
+ OPTYPE_FCOE_BIOS = 8,
+ OPTYPE_ISCSI_BACKUP = 9,
+ OPTYPE_FCOE_FW_ACTIVE = 10,
+ OPTYPE_FCOE_FW_BACKUP = 11,
+ OPTYPE_NCSI_FW = 13,
+ OPTYPE_REDBOOT_DIR = 18,
+ OPTYPE_REDBOOT_CONFIG = 19,
+ OPTYPE_SH_PHY_FW = 21,
+ OPTYPE_FLASHISM_JUMPVECTOR = 22,
+ OPTYPE_UFI_DIR = 23,
+ OPTYPE_PHY_FW = 99
+};
+
+/* Maximum sizes of components in BE2 FW UFI */
+enum {
+ BE2_BIOS_COMP_MAX_SIZE = 0x40000,
+ BE2_REDBOOT_COMP_MAX_SIZE = 0x40000,
+ BE2_COMP_MAX_SIZE = 0x140000
+};
+
+/* Maximum sizes of components in BE3 FW UFI */
+enum {
+ BE3_NCSI_COMP_MAX_SIZE = 0x40000,
+ BE3_PHY_FW_COMP_MAX_SIZE = 0x40000,
+ BE3_BIOS_COMP_MAX_SIZE = 0x80000,
+ BE3_REDBOOT_COMP_MAX_SIZE = 0x100000,
+ BE3_COMP_MAX_SIZE = 0x200000
+};
+
+/* Offsets for components in BE2 FW UFI */
+enum {
+ BE2_REDBOOT_START = 0x8000,
+ BE2_FCOE_BIOS_START = 0x80000,
+ BE2_ISCSI_PRIMARY_IMAGE_START = 0x100000,
+ BE2_ISCSI_BACKUP_IMAGE_START = 0x240000,
+ BE2_FCOE_PRIMARY_IMAGE_START = 0x380000,
+ BE2_FCOE_BACKUP_IMAGE_START = 0x4c0000,
+ BE2_ISCSI_BIOS_START = 0x700000,
+ BE2_PXE_BIOS_START = 0x780000
+};
+
+/* Offsets for components in BE3 FW UFI */
+enum {
+ BE3_REDBOOT_START = 0x40000,
+ BE3_PHY_FW_START = 0x140000,
+ BE3_ISCSI_PRIMARY_IMAGE_START = 0x200000,
+ BE3_ISCSI_BACKUP_IMAGE_START = 0x400000,
+ BE3_FCOE_PRIMARY_IMAGE_START = 0x600000,
+ BE3_FCOE_BACKUP_IMAGE_START = 0x800000,
+ BE3_ISCSI_BIOS_START = 0xc00000,
+ BE3_PXE_BIOS_START = 0xc80000,
+ BE3_FCOE_BIOS_START = 0xd00000,
+ BE3_NCSI_START = 0xf40000
+};
+
+/* Component entry types */
+enum {
+ IMAGE_NCSI = 0x10,
+ IMAGE_OPTION_ROM_PXE = 0x20,
+ IMAGE_OPTION_ROM_FCOE = 0x21,
+ IMAGE_OPTION_ROM_ISCSI = 0x22,
+ IMAGE_FLASHISM_JUMPVECTOR = 0x30,
+ IMAGE_FIRMWARE_ISCSI = 0xa0,
+ IMAGE_FIRMWARE_FCOE = 0xa2,
+ IMAGE_FIRMWARE_BACKUP_ISCSI = 0xb0,
+ IMAGE_FIRMWARE_BACKUP_FCOE = 0xb2,
+ IMAGE_FIRMWARE_PHY = 0xc0,
+ IMAGE_REDBOOT_DIR = 0xd0,
+ IMAGE_REDBOOT_CONFIG = 0xd1,
+ IMAGE_UFI_DIR = 0xd2,
+ IMAGE_BOOT_CODE = 0xe2
+};
struct controller_id {
u32 vendor;
@@ -1394,6 +1411,9 @@ struct be_cmd_read_flash_crc {
} __packed;
/**************** Lancer Firmware Flash ************/
+#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
+#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
+
struct amap_lancer_write_obj_context {
u8 write_length[24];
u8 reserved1[7];
@@ -1654,11 +1674,7 @@ struct mgmt_hba_attribs {
struct mgmt_controller_attrib {
struct mgmt_hba_attribs hba_attribs;
- u32 rsvd0[2];
- u16 rsvd1;
- u8 pci_func_num;
- u8 rsvd2;
- u32 rsvd3[7];
+ u32 rsvd0[10];
} __packed;
struct be_cmd_req_cntl_attribs {
@@ -2083,6 +2099,7 @@ struct be_port_res_desc {
#define NV_TYPE_VXLAN 3
#define SOCVID_SHIFT 2 /* Strip outer vlan */
#define RCVID_SHIFT 4 /* Report vlan */
+#define PF_NUM_IGNORE 255
u8 nv_flags;
u8 rsvd2;
__le16 nv_port; /* vxlan/gre port */
@@ -2246,7 +2263,8 @@ struct be_cmd_resp_get_iface_list {
};
/*************** Set logical link ********************/
-#define PLINK_TRACK_SHIFT 8
+#define PLINK_ENABLE BIT(0)
+#define PLINK_TRACK BIT(8)
struct be_cmd_req_set_ll_link {
struct be_cmd_req_hdr hdr;
u32 link_config; /* Bit 0: UP_DOWN, Bit 9: PLINK */
@@ -2321,19 +2339,11 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
u8 page_num, u8 *data);
int be_cmd_query_cable_type(struct be_adapter *adapter);
int be_cmd_query_sfp_info(struct be_adapter *adapter);
-int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
- u32 flash_oper, u32 flash_opcode, u32 img_offset,
- u32 buf_size);
-int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
- u32 data_size, u32 data_offset,
- const char *obj_name, u32 *data_written,
- u8 *change_status, u8 *addn_status);
int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
u32 data_size, u32 data_offset, const char *obj_name,
u32 *data_read, u32 *eof, u8 *addn_status);
-int lancer_cmd_delete_object(struct be_adapter *adapter, const char *obj_name);
-int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
- u16 img_optype, u32 img_offset, u32 crc_offset);
+int lancer_fw_download(struct be_adapter *adapter, const struct firmware *fw);
+int be_fw_download(struct be_adapter *adapter, const struct firmware *fw);
int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
struct be_dma_mem *nonemb_cmd);
int be_cmd_fw_init(struct be_adapter *adapter);
@@ -2355,9 +2365,9 @@ int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate,
void be_detect_error(struct be_adapter *adapter);
int be_cmd_get_die_temperature(struct be_adapter *adapter);
int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
+int be_cmd_get_fat_dump_len(struct be_adapter *adapter, u32 *dump_size);
+int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf);
int be_cmd_req_native_mode(struct be_adapter *adapter);
-int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
-int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
u32 domain);
int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index d2a5baf..a19ac44 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -250,6 +250,19 @@ static u32 lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
return data_read;
}
+static int be_get_dump_len(struct be_adapter *adapter)
+{
+ u32 dump_size = 0;
+
+ if (lancer_chip(adapter))
+ dump_size = lancer_cmd_get_file_len(adapter,
+ LANCER_FW_DUMP_FILE);
+ else
+ dump_size = adapter->fat_dump_len;
+
+ return dump_size;
+}
+
static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
u32 buf_len, void *buf)
{
@@ -291,37 +304,18 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
return status;
}
-static int be_get_reg_len(struct net_device *netdev)
+static int be_read_dump_data(struct be_adapter *adapter, u32 dump_len,
+ void *buf)
{
- struct be_adapter *adapter = netdev_priv(netdev);
- u32 log_size = 0;
-
- if (!check_privilege(adapter, MAX_PRIVILEGES))
- return 0;
-
- if (be_physfn(adapter)) {
- if (lancer_chip(adapter))
- log_size = lancer_cmd_get_file_len(adapter,
- LANCER_FW_DUMP_FILE);
- else
- be_cmd_get_reg_len(adapter, &log_size);
- }
- return log_size;
-}
+ int status = 0;
-static void
-be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
+ if (lancer_chip(adapter))
+ status = lancer_cmd_read_file(adapter, LANCER_FW_DUMP_FILE,
+ dump_len, buf);
+ else
+ status = be_cmd_get_fat_dump(adapter, dump_len, buf);
- if (be_physfn(adapter)) {
- memset(buf, 0, regs->len);
- if (lancer_chip(adapter))
- lancer_cmd_read_file(adapter, LANCER_FW_DUMP_FILE,
- regs->len, buf);
- else
- be_cmd_get_regs(adapter, regs->len, buf);
- }
+ return status;
}
static int be_get_coalesce(struct net_device *netdev,
@@ -914,6 +908,34 @@ static int be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
return be_load_fw(adapter, efl->data);
}
+static int
+be_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ if (!check_privilege(adapter, MAX_PRIVILEGES))
+ return -EOPNOTSUPP;
+
+ dump->len = be_get_dump_len(adapter);
+ dump->version = 1;
+ dump->flag = 0x1; /* FW dump is enabled */
+ return 0;
+}
+
+static int
+be_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
+ void *buf)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ int status;
+
+ if (!check_privilege(adapter, MAX_PRIVILEGES))
+ return -EOPNOTSUPP;
+
+ status = be_read_dump_data(adapter, dump->len, buf);
+ return be_cmd_status(status);
+}
+
static int be_get_eeprom_len(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -1311,8 +1333,6 @@ const struct ethtool_ops be_ethtool_ops = {
.set_msglevel = be_set_msg_level,
.get_sset_count = be_get_sset_count,
.get_ethtool_stats = be_get_ethtool_stats,
- .get_regs_len = be_get_reg_len,
- .get_regs = be_get_regs,
.flash_device = be_do_flash,
.self_test = be_self_test,
.get_rxnfc = be_get_rxnfc,
@@ -1321,6 +1341,8 @@ const struct ethtool_ops be_ethtool_ops = {
.get_rxfh_key_size = be_get_rxfh_key_size,
.get_rxfh = be_get_rxfh,
.set_rxfh = be_set_rxfh,
+ .get_dump_flag = be_get_dump_flag,
+ .get_dump_data = be_get_dump_data,
.get_channels = be_get_channels,
.set_channels = be_set_channels,
.get_module_info = be_get_module_info,
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 34e324f..00059af 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -729,7 +729,7 @@ static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
/* If vlan priority provided by OS is NOT in available bmap */
if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
- adapter->recommended_prio;
+ adapter->recommended_prio_bits;
return vlan_tag;
}
@@ -4204,10 +4204,17 @@ static int be_get_config(struct be_adapter *adapter)
int status, level;
u16 profile_id;
+ status = be_cmd_get_cntl_attributes(adapter);
+ if (status)
+ return status;
+
status = be_cmd_query_fw_cfg(adapter);
if (status)
return status;
+ if (!lancer_chip(adapter) && be_physfn(adapter))
+ be_cmd_get_fat_dump_len(adapter, &adapter->fat_dump_len);
+
if (BEx_chip(adapter)) {
level = be_cmd_get_fw_log_level(adapter);
adapter->msg_enable =
@@ -4402,10 +4409,14 @@ static int be_setup(struct be_adapter *adapter)
if (!lancer_chip(adapter))
be_cmd_req_native_mode(adapter);
- /* Need to invoke this cmd first to get the PCI Function Number */
- status = be_cmd_get_cntl_attributes(adapter);
- if (status)
- return status;
+ /* invoke this cmd first to get pf_num and vf_num which are needed
+ * for issuing profile related cmds
+ */
+ if (!BEx_chip(adapter)) {
+ status = be_cmd_get_func_config(adapter, NULL);
+ if (status)
+ return status;
+ }
if (!BE2_chip(adapter) && be_physfn(adapter))
be_alloc_sriov_res(adapter);
@@ -4490,570 +4501,6 @@ static void be_netpoll(struct net_device *netdev)
}
#endif
-static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
-
-static bool phy_flashing_required(struct be_adapter *adapter)
-{
- return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
- adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
-}
-
-static bool is_comp_in_ufi(struct be_adapter *adapter,
- struct flash_section_info *fsec, int type)
-{
- int i = 0, img_type = 0;
- struct flash_section_info_g2 *fsec_g2 = NULL;
-
- if (BE2_chip(adapter))
- fsec_g2 = (struct flash_section_info_g2 *)fsec;
-
- for (i = 0; i < MAX_FLASH_COMP; i++) {
- if (fsec_g2)
- img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
- else
- img_type = le32_to_cpu(fsec->fsec_entry[i].type);
-
- if (img_type == type)
- return true;
- }
- return false;
-
-}
-
-static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
- int header_size,
- const struct firmware *fw)
-{
- struct flash_section_info *fsec = NULL;
- const u8 *p = fw->data;
-
- p += header_size;
- while (p < (fw->data + fw->size)) {
- fsec = (struct flash_section_info *)p;
- if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
- return fsec;
- p += 32;
- }
- return NULL;
-}
-
-static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
- u32 img_offset, u32 img_size, int hdr_size,
- u16 img_optype, bool *crc_match)
-{
- u32 crc_offset;
- int status;
- u8 crc[4];
-
- status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
- img_size - 4);
- if (status)
- return status;
-
- crc_offset = hdr_size + img_offset + img_size - 4;
-
- /* Skip flashing, if crc of flashed region matches */
- if (!memcmp(crc, p + crc_offset, 4))
- *crc_match = true;
- else
- *crc_match = false;
-
- return status;
-}
-
-static int be_flash(struct be_adapter *adapter, const u8 *img,
- struct be_dma_mem *flash_cmd, int optype, int img_size,
- u32 img_offset)
-{
- u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
- struct be_cmd_write_flashrom *req = flash_cmd->va;
- int status;
-
- while (total_bytes) {
- num_bytes = min_t(u32, 32*1024, total_bytes);
-
- total_bytes -= num_bytes;
-
- if (!total_bytes) {
- if (optype == OPTYPE_PHY_FW)
- flash_op = FLASHROM_OPER_PHY_FLASH;
- else
- flash_op = FLASHROM_OPER_FLASH;
- } else {
- if (optype == OPTYPE_PHY_FW)
- flash_op = FLASHROM_OPER_PHY_SAVE;
- else
- flash_op = FLASHROM_OPER_SAVE;
- }
-
- memcpy(req->data_buf, img, num_bytes);
- img += num_bytes;
- status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
- flash_op, img_offset +
- bytes_sent, num_bytes);
- if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
- optype == OPTYPE_PHY_FW)
- break;
- else if (status)
- return status;
-
- bytes_sent += num_bytes;
- }
- return 0;
-}
-
-/* For BE2, BE3 and BE3-R */
-static int be_flash_BEx(struct be_adapter *adapter,
- const struct firmware *fw,
- struct be_dma_mem *flash_cmd, int num_of_images)
-{
- int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
- struct device *dev = &adapter->pdev->dev;
- struct flash_section_info *fsec = NULL;
- int status, i, filehdr_size, num_comp;
- const struct flash_comp *pflashcomp;
- bool crc_match;
- const u8 *p;
-
- struct flash_comp gen3_flash_types[] = {
- { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
- FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
- { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
- FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
- { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
- FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
- { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
- FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
- { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
- FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
- { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
- FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
- { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
- FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
- { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
- FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
- { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
- FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
- { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
- FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
- };
-
- struct flash_comp gen2_flash_types[] = {
- { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
- FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
- { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
- FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
- { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
- FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
- { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
- FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
- { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
- FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
- { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
- FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
- { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
- FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
- { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
- FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
- };
-
- if (BE3_chip(adapter)) {
- pflashcomp = gen3_flash_types;
- filehdr_size = sizeof(struct flash_file_hdr_g3);
- num_comp = ARRAY_SIZE(gen3_flash_types);
- } else {
- pflashcomp = gen2_flash_types;
- filehdr_size = sizeof(struct flash_file_hdr_g2);
- num_comp = ARRAY_SIZE(gen2_flash_types);
- img_hdrs_size = 0;
- }
-
- /* Get flash section info*/
- fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
- if (!fsec) {
- dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
- return -1;
- }
- for (i = 0; i < num_comp; i++) {
- if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
- continue;
-
- if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
- memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
- continue;
-
- if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
- !phy_flashing_required(adapter))
- continue;
-
- if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
- status = be_check_flash_crc(adapter, fw->data,
- pflashcomp[i].offset,
- pflashcomp[i].size,
- filehdr_size +
- img_hdrs_size,
- OPTYPE_REDBOOT, &crc_match);
- if (status) {
- dev_err(dev,
- "Could not get CRC for 0x%x region\n",
- pflashcomp[i].optype);
- continue;
- }
-
- if (crc_match)
- continue;
- }
-
- p = fw->data + filehdr_size + pflashcomp[i].offset +
- img_hdrs_size;
- if (p + pflashcomp[i].size > fw->data + fw->size)
- return -1;
-
- status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
- pflashcomp[i].size, 0);
- if (status) {
- dev_err(dev, "Flashing section type 0x%x failed\n",
- pflashcomp[i].img_type);
- return status;
- }
- }
- return 0;
-}
-
-static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
-{
- u32 img_type = le32_to_cpu(fsec_entry.type);
- u16 img_optype = le16_to_cpu(fsec_entry.optype);
-
- if (img_optype != 0xFFFF)
- return img_optype;
-
- switch (img_type) {
- case IMAGE_FIRMWARE_iSCSI:
- img_optype = OPTYPE_ISCSI_ACTIVE;
- break;
- case IMAGE_BOOT_CODE:
- img_optype = OPTYPE_REDBOOT;
- break;
- case IMAGE_OPTION_ROM_ISCSI:
- img_optype = OPTYPE_BIOS;
- break;
- case IMAGE_OPTION_ROM_PXE:
- img_optype = OPTYPE_PXE_BIOS;
- break;
- case IMAGE_OPTION_ROM_FCoE:
- img_optype = OPTYPE_FCOE_BIOS;
- break;
- case IMAGE_FIRMWARE_BACKUP_iSCSI:
- img_optype = OPTYPE_ISCSI_BACKUP;
- break;
- case IMAGE_NCSI:
- img_optype = OPTYPE_NCSI_FW;
- break;
- case IMAGE_FLASHISM_JUMPVECTOR:
- img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
- break;
- case IMAGE_FIRMWARE_PHY:
- img_optype = OPTYPE_SH_PHY_FW;
- break;
- case IMAGE_REDBOOT_DIR:
- img_optype = OPTYPE_REDBOOT_DIR;
- break;
- case IMAGE_REDBOOT_CONFIG:
- img_optype = OPTYPE_REDBOOT_CONFIG;
- break;
- case IMAGE_UFI_DIR:
- img_optype = OPTYPE_UFI_DIR;
- break;
- default:
- break;
- }
-
- return img_optype;
-}
-
-static int be_flash_skyhawk(struct be_adapter *adapter,
- const struct firmware *fw,
- struct be_dma_mem *flash_cmd, int num_of_images)
-{
- int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
- bool crc_match, old_fw_img, flash_offset_support = true;
- struct device *dev = &adapter->pdev->dev;
- struct flash_section_info *fsec = NULL;
- u32 img_offset, img_size, img_type;
- u16 img_optype, flash_optype;
- int status, i, filehdr_size;
- const u8 *p;
-
- filehdr_size = sizeof(struct flash_file_hdr_g3);
- fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
- if (!fsec) {
- dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
- return -EINVAL;
- }
-
-retry_flash:
- for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
- img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
- img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
- img_type = le32_to_cpu(fsec->fsec_entry[i].type);
- img_optype = be_get_img_optype(fsec->fsec_entry[i]);
- old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
-
- if (img_optype == 0xFFFF)
- continue;
-
- if (flash_offset_support)
- flash_optype = OPTYPE_OFFSET_SPECIFIED;
- else
- flash_optype = img_optype;
-
- /* Don't bother verifying CRC if an old FW image is being
- * flashed
- */
- if (old_fw_img)
- goto flash;
-
- status = be_check_flash_crc(adapter, fw->data, img_offset,
- img_size, filehdr_size +
- img_hdrs_size, flash_optype,
- &crc_match);
- if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
- base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
- /* The current FW image on the card does not support
- * OFFSET based flashing. Retry using older mechanism
- * of OPTYPE based flashing
- */
- if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
- flash_offset_support = false;
- goto retry_flash;
- }
-
- /* The current FW image on the card does not recognize
- * the new FLASH op_type. The FW download is partially
- * complete. Reboot the server now to enable FW image
- * to recognize the new FLASH op_type. To complete the
- * remaining process, download the same FW again after
- * the reboot.
- */
- dev_err(dev, "Flash incomplete. Reset the server\n");
- dev_err(dev, "Download FW image again after reset\n");
- return -EAGAIN;
- } else if (status) {
- dev_err(dev, "Could not get CRC for 0x%x region\n",
- img_optype);
- return -EFAULT;
- }
-
- if (crc_match)
- continue;
-
-flash:
- p = fw->data + filehdr_size + img_offset + img_hdrs_size;
- if (p + img_size > fw->data + fw->size)
- return -1;
-
- status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
- img_offset);
-
- /* The current FW image on the card does not support OFFSET
- * based flashing. Retry using older mechanism of OPTYPE based
- * flashing
- */
- if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
- flash_optype == OPTYPE_OFFSET_SPECIFIED) {
- flash_offset_support = false;
- goto retry_flash;
- }
-
- /* For old FW images ignore ILLEGAL_FIELD error or errors on
- * UFI_DIR region
- */
- if (old_fw_img &&
- (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
- (img_optype == OPTYPE_UFI_DIR &&
- base_status(status) == MCC_STATUS_FAILED))) {
- continue;
- } else if (status) {
- dev_err(dev, "Flashing section type 0x%x failed\n",
- img_type);
- return -EFAULT;
- }
- }
- return 0;
-}
-
-static int lancer_fw_download(struct be_adapter *adapter,
- const struct firmware *fw)
-{
-#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
-#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
- struct device *dev = &adapter->pdev->dev;
- struct be_dma_mem flash_cmd;
- const u8 *data_ptr = NULL;
- u8 *dest_image_ptr = NULL;
- size_t image_size = 0;
- u32 chunk_size = 0;
- u32 data_written = 0;
- u32 offset = 0;
- int status = 0;
- u8 add_status = 0;
- u8 change_status;
-
- if (!IS_ALIGNED(fw->size, sizeof(u32))) {
- dev_err(dev, "FW image size should be multiple of 4\n");
- return -EINVAL;
- }
-
- flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
- + LANCER_FW_DOWNLOAD_CHUNK;
- flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size,
- &flash_cmd.dma, GFP_KERNEL);
- if (!flash_cmd.va)
- return -ENOMEM;
-
- dest_image_ptr = flash_cmd.va +
- sizeof(struct lancer_cmd_req_write_object);
- image_size = fw->size;
- data_ptr = fw->data;
-
- while (image_size) {
- chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
-
- /* Copy the image chunk content. */
- memcpy(dest_image_ptr, data_ptr, chunk_size);
-
- status = lancer_cmd_write_object(adapter, &flash_cmd,
- chunk_size, offset,
- LANCER_FW_DOWNLOAD_LOCATION,
- &data_written, &change_status,
- &add_status);
- if (status)
- break;
-
- offset += data_written;
- data_ptr += data_written;
- image_size -= data_written;
- }
-
- if (!status) {
- /* Commit the FW written */
- status = lancer_cmd_write_object(adapter, &flash_cmd,
- 0, offset,
- LANCER_FW_DOWNLOAD_LOCATION,
- &data_written, &change_status,
- &add_status);
- }
-
- dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
- if (status) {
- dev_err(dev, "Firmware load error\n");
- return be_cmd_status(status);
- }
-
- dev_info(dev, "Firmware flashed successfully\n");
-
- if (change_status == LANCER_FW_RESET_NEEDED) {
- dev_info(dev, "Resetting adapter to activate new FW\n");
- status = lancer_physdev_ctrl(adapter,
- PHYSDEV_CONTROL_FW_RESET_MASK);
- if (status) {
- dev_err(dev, "Adapter busy, could not reset FW\n");
- dev_err(dev, "Reboot server to activate new FW\n");
- }
- } else if (change_status != LANCER_NO_RESET_NEEDED) {
- dev_info(dev, "Reboot server to activate new FW\n");
- }
-
- return 0;
-}
-
-/* Check if the flash image file is compatible with the adapter that
- * is being flashed.
- */
-static bool be_check_ufi_compatibility(struct be_adapter *adapter,
- struct flash_file_hdr_g3 *fhdr)
-{
- if (!fhdr) {
- dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
- return false;
- }
-
- /* First letter of the build version is used to identify
- * which chip this image file is meant for.
- */
- switch (fhdr->build[0]) {
- case BLD_STR_UFI_TYPE_SH:
- if (!skyhawk_chip(adapter))
- return false;
- break;
- case BLD_STR_UFI_TYPE_BE3:
- if (!BE3_chip(adapter))
- return false;
- break;
- case BLD_STR_UFI_TYPE_BE2:
- if (!BE2_chip(adapter))
- return false;
- break;
- default:
- return false;
- }
-
- /* In BE3 FW images the "asic_type_rev" field doesn't track the
- * asic_rev of the chips it is compatible with.
- * When asic_type_rev is 0 the image is compatible only with
- * pre-BE3-R chips (asic_rev < 0x10)
- */
- if (BEx_chip(adapter) && fhdr->asic_type_rev == 0)
- return adapter->asic_rev < 0x10;
- else
- return (fhdr->asic_type_rev >= adapter->asic_rev);
-}
-
-static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
-{
- struct device *dev = &adapter->pdev->dev;
- struct flash_file_hdr_g3 *fhdr3;
- struct image_hdr *img_hdr_ptr;
- int status = 0, i, num_imgs;
- struct be_dma_mem flash_cmd;
-
- fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
- if (!be_check_ufi_compatibility(adapter, fhdr3)) {
- dev_err(dev, "Flash image is not compatible with adapter\n");
- return -EINVAL;
- }
-
- flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
- flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
- GFP_KERNEL);
- if (!flash_cmd.va)
- return -ENOMEM;
-
- num_imgs = le32_to_cpu(fhdr3->num_imgs);
- for (i = 0; i < num_imgs; i++) {
- img_hdr_ptr = (struct image_hdr *)(fw->data +
- (sizeof(struct flash_file_hdr_g3) +
- i * sizeof(struct image_hdr)));
- if (!BE2_chip(adapter) &&
- le32_to_cpu(img_hdr_ptr->imageid) != 1)
- continue;
-
- if (skyhawk_chip(adapter))
- status = be_flash_skyhawk(adapter, fw, &flash_cmd,
- num_imgs);
- else
- status = be_flash_BEx(adapter, fw, &flash_cmd,
- num_imgs);
- }
-
- dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
- if (!status)
- dev_info(dev, "Firmware flashed successfully\n");
-
- return status;
-}
-
int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
{
const struct firmware *fw;
@@ -5108,6 +4555,9 @@ static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
return -EINVAL;
mode = nla_get_u16(attr);
+ if (BE3_chip(adapter) && mode == BRIDGE_MODE_VEPA)
+ return -EOPNOTSUPP;
+
if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
return -EINVAL;
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index bee32a9..d1ca45f 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -54,6 +54,7 @@ config FEC_MPC52xx_MDIO
If compiled as module, it will be called fec_mpc52xx_phy.
source "drivers/net/ethernet/freescale/fs_enet/Kconfig"
+source "drivers/net/ethernet/freescale/fman/Kconfig"
config FSL_PQ_MDIO
tristate "Freescale PQ MDIO"
diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
index 71debd1..4097c58 100644
--- a/drivers/net/ethernet/freescale/Makefile
+++ b/drivers/net/ethernet/freescale/Makefile
@@ -17,3 +17,5 @@ gianfar_driver-objs := gianfar.o \
gianfar_ethtool.o
obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o
ucc_geth_driver-objs := ucc_geth.o ucc_geth_ethtool.o
+
+obj-$(CONFIG_FSL_FMAN) += fman/
diff --git a/drivers/net/ethernet/freescale/fman/Kconfig b/drivers/net/ethernet/freescale/fman/Kconfig
new file mode 100644
index 0000000..66b7296
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fman/Kconfig
@@ -0,0 +1,8 @@
+config FSL_FMAN
+ bool "FMan support"
+ depends on FSL_SOC || COMPILE_TEST
+ select GENERIC_ALLOCATOR
+ default n
+ help
+ Freescale Data-Path Acceleration Architecture Frame Manager
+ (FMan) support
diff --git a/drivers/net/ethernet/freescale/fman/Makefile b/drivers/net/ethernet/freescale/fman/Makefile
new file mode 100644
index 0000000..51fd2e6
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fman/Makefile
@@ -0,0 +1,7 @@
+subdir-ccflags-y += -I$(srctree)/drivers/net/ethernet/freescale/fman
+
+obj-y += fsl_fman.o fsl_fman_mac.o fsl_mac.o
+
+fsl_fman-objs := fman_muram.o fman.o fman_sp.o fman_port.o
+fsl_fman_mac-objs := fman_dtsec.o fman_memac.o fman_tgec.o
+fsl_mac-objs += mac.o
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
new file mode 100644
index 0000000..623aa1c
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -0,0 +1,2871 @@
+/*
+ * Copyright 2008-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "fman.h"
+#include "fman_muram.h"
+
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/clk.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+#include <linux/libfdt_env.h>
+
+/* General defines */
+#define FMAN_LIODN_TBL 64 /* size of LIODN table */
+#define MAX_NUM_OF_MACS 10
+#define FM_NUM_OF_FMAN_CTRL_EVENT_REGS 4
+#define BASE_RX_PORTID 0x08
+#define BASE_TX_PORTID 0x28
+
+/* Modules registers offsets */
+#define BMI_OFFSET 0x00080000
+#define QMI_OFFSET 0x00080400
+#define DMA_OFFSET 0x000C2000
+#define FPM_OFFSET 0x000C3000
+#define IMEM_OFFSET 0x000C4000
+#define CGP_OFFSET 0x000DB000
+
+/* Exceptions bit map */
+#define EX_DMA_BUS_ERROR 0x80000000
+#define EX_DMA_READ_ECC 0x40000000
+#define EX_DMA_SYSTEM_WRITE_ECC 0x20000000
+#define EX_DMA_FM_WRITE_ECC 0x10000000
+#define EX_FPM_STALL_ON_TASKS 0x08000000
+#define EX_FPM_SINGLE_ECC 0x04000000
+#define EX_FPM_DOUBLE_ECC 0x02000000
+#define EX_QMI_SINGLE_ECC 0x01000000
+#define EX_QMI_DEQ_FROM_UNKNOWN_PORTID 0x00800000
+#define EX_QMI_DOUBLE_ECC 0x00400000
+#define EX_BMI_LIST_RAM_ECC 0x00200000
+#define EX_BMI_STORAGE_PROFILE_ECC 0x00100000
+#define EX_BMI_STATISTICS_RAM_ECC 0x00080000
+#define EX_IRAM_ECC 0x00040000
+#define EX_MURAM_ECC 0x00020000
+#define EX_BMI_DISPATCH_RAM_ECC 0x00010000
+#define EX_DMA_SINGLE_PORT_ECC 0x00008000
+
+/* DMA defines */
+/* masks */
+#define DMA_MODE_BER 0x00200000
+#define DMA_MODE_ECC 0x00000020
+#define DMA_MODE_SECURE_PROT 0x00000800
+#define DMA_MODE_AXI_DBG_MASK 0x0F000000
+
+#define DMA_TRANSFER_PORTID_MASK 0xFF000000
+#define DMA_TRANSFER_TNUM_MASK 0x00FF0000
+#define DMA_TRANSFER_LIODN_MASK 0x00000FFF
+
+#define DMA_STATUS_BUS_ERR 0x08000000
+#define DMA_STATUS_READ_ECC 0x04000000
+#define DMA_STATUS_SYSTEM_WRITE_ECC 0x02000000
+#define DMA_STATUS_FM_WRITE_ECC 0x01000000
+#define DMA_STATUS_FM_SPDAT_ECC 0x00080000
+
+#define DMA_MODE_CACHE_OR_SHIFT 30
+#define DMA_MODE_AXI_DBG_SHIFT 24
+#define DMA_MODE_CEN_SHIFT 13
+#define DMA_MODE_CEN_MASK 0x00000007
+#define DMA_MODE_DBG_SHIFT 7
+#define DMA_MODE_AID_MODE_SHIFT 4
+
+#define DMA_THRESH_COMMQ_SHIFT 24
+#define DMA_THRESH_READ_INT_BUF_SHIFT 16
+#define DMA_THRESH_READ_INT_BUF_MASK 0x0000003f
+#define DMA_THRESH_WRITE_INT_BUF_MASK 0x0000003f
+
+#define DMA_TRANSFER_PORTID_SHIFT 24
+#define DMA_TRANSFER_TNUM_SHIFT 16
+
+#define DMA_CAM_SIZEOF_ENTRY 0x40
+#define DMA_CAM_UNITS 8
+
+#define DMA_LIODN_SHIFT 16
+#define DMA_LIODN_BASE_MASK 0x00000FFF
+
+/* FPM defines */
+#define FPM_EV_MASK_DOUBLE_ECC 0x80000000
+#define FPM_EV_MASK_STALL 0x40000000
+#define FPM_EV_MASK_SINGLE_ECC 0x20000000
+#define FPM_EV_MASK_RELEASE_FM 0x00010000
+#define FPM_EV_MASK_DOUBLE_ECC_EN 0x00008000
+#define FPM_EV_MASK_STALL_EN 0x00004000
+#define FPM_EV_MASK_SINGLE_ECC_EN 0x00002000
+#define FPM_EV_MASK_EXTERNAL_HALT 0x00000008
+#define FPM_EV_MASK_ECC_ERR_HALT 0x00000004
+
+#define FPM_RAM_MURAM_ECC 0x00008000
+#define FPM_RAM_IRAM_ECC 0x00004000
+#define FPM_IRAM_ECC_ERR_EX_EN 0x00020000
+#define FPM_MURAM_ECC_ERR_EX_EN 0x00040000
+#define FPM_RAM_IRAM_ECC_EN 0x40000000
+#define FPM_RAM_RAMS_ECC_EN 0x80000000
+#define FPM_RAM_RAMS_ECC_EN_SRC_SEL 0x08000000
+
+#define FPM_REV1_MAJOR_MASK 0x0000FF00
+#define FPM_REV1_MINOR_MASK 0x000000FF
+
+#define FPM_DISP_LIMIT_SHIFT 24
+
+#define FPM_PRT_FM_CTL1 0x00000001
+#define FPM_PRT_FM_CTL2 0x00000002
+#define FPM_PORT_FM_CTL_PORTID_SHIFT 24
+#define FPM_PRC_ORA_FM_CTL_SEL_SHIFT 16
+
+#define FPM_THR1_PRS_SHIFT 24
+#define FPM_THR1_KG_SHIFT 16
+#define FPM_THR1_PLCR_SHIFT 8
+#define FPM_THR1_BMI_SHIFT 0
+
+#define FPM_THR2_QMI_ENQ_SHIFT 24
+#define FPM_THR2_QMI_DEQ_SHIFT 0
+#define FPM_THR2_FM_CTL1_SHIFT 16
+#define FPM_THR2_FM_CTL2_SHIFT 8
+
+#define FPM_EV_MASK_CAT_ERR_SHIFT 1
+#define FPM_EV_MASK_DMA_ERR_SHIFT 0
+
+#define FPM_REV1_MAJOR_SHIFT 8
+
+#define FPM_RSTC_FM_RESET 0x80000000
+#define FPM_RSTC_MAC0_RESET 0x40000000
+#define FPM_RSTC_MAC1_RESET 0x20000000
+#define FPM_RSTC_MAC2_RESET 0x10000000
+#define FPM_RSTC_MAC3_RESET 0x08000000
+#define FPM_RSTC_MAC8_RESET 0x04000000
+#define FPM_RSTC_MAC4_RESET 0x02000000
+#define FPM_RSTC_MAC5_RESET 0x01000000
+#define FPM_RSTC_MAC6_RESET 0x00800000
+#define FPM_RSTC_MAC7_RESET 0x00400000
+#define FPM_RSTC_MAC9_RESET 0x00200000
+
+#define FPM_TS_INT_SHIFT 16
+#define FPM_TS_CTL_EN 0x80000000
+
+/* BMI defines */
+#define BMI_INIT_START 0x80000000
+#define BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC 0x80000000
+#define BMI_ERR_INTR_EN_LIST_RAM_ECC 0x40000000
+#define BMI_ERR_INTR_EN_STATISTICS_RAM_ECC 0x20000000
+#define BMI_ERR_INTR_EN_DISPATCH_RAM_ECC 0x10000000
+#define BMI_NUM_OF_TASKS_MASK 0x3F000000
+#define BMI_NUM_OF_EXTRA_TASKS_MASK 0x000F0000
+#define BMI_NUM_OF_DMAS_MASK 0x00000F00
+#define BMI_NUM_OF_EXTRA_DMAS_MASK 0x0000000F
+#define BMI_FIFO_SIZE_MASK 0x000003FF
+#define BMI_EXTRA_FIFO_SIZE_MASK 0x03FF0000
+#define BMI_CFG2_DMAS_MASK 0x0000003F
+#define BMI_CFG2_TASKS_MASK 0x0000003F
+
+#define BMI_CFG2_TASKS_SHIFT 16
+#define BMI_CFG2_DMAS_SHIFT 0
+#define BMI_CFG1_FIFO_SIZE_SHIFT 16
+#define BMI_NUM_OF_TASKS_SHIFT 24
+#define BMI_EXTRA_NUM_OF_TASKS_SHIFT 16
+#define BMI_NUM_OF_DMAS_SHIFT 8
+#define BMI_EXTRA_NUM_OF_DMAS_SHIFT 0
+
+#define BMI_FIFO_ALIGN 0x100
+
+#define BMI_EXTRA_FIFO_SIZE_SHIFT 16
+
+/* QMI defines */
+#define QMI_CFG_ENQ_EN 0x80000000
+#define QMI_CFG_DEQ_EN 0x40000000
+#define QMI_CFG_EN_COUNTERS 0x10000000
+#define QMI_CFG_DEQ_MASK 0x0000003F
+#define QMI_CFG_ENQ_MASK 0x00003F00
+#define QMI_CFG_ENQ_SHIFT 8
+
+#define QMI_ERR_INTR_EN_DOUBLE_ECC 0x80000000
+#define QMI_ERR_INTR_EN_DEQ_FROM_DEF 0x40000000
+#define QMI_INTR_EN_SINGLE_ECC 0x80000000
+
+#define QMI_GS_HALT_NOT_BUSY 0x00000002
+
+/* IRAM defines */
+#define IRAM_IADD_AIE 0x80000000
+#define IRAM_READY 0x80000000
+
+/* Default values */
+#define DEFAULT_CATASTROPHIC_ERR 0
+#define DEFAULT_DMA_ERR 0
+#define DEFAULT_AID_MODE FMAN_DMA_AID_OUT_TNUM
+#define DEFAULT_DMA_COMM_Q_LOW 0x2A
+#define DEFAULT_DMA_COMM_Q_HIGH 0x3F
+#define DEFAULT_CACHE_OVERRIDE 0
+#define DEFAULT_DMA_CAM_NUM_OF_ENTRIES 64
+#define DEFAULT_DMA_DBG_CNT_MODE 0
+#define DEFAULT_DMA_SOS_EMERGENCY 0
+#define DEFAULT_DMA_WATCHDOG 0
+#define DEFAULT_DISP_LIMIT 0
+#define DEFAULT_PRS_DISP_TH 16
+#define DEFAULT_PLCR_DISP_TH 16
+#define DEFAULT_KG_DISP_TH 16
+#define DEFAULT_BMI_DISP_TH 16
+#define DEFAULT_QMI_ENQ_DISP_TH 16
+#define DEFAULT_QMI_DEQ_DISP_TH 16
+#define DEFAULT_FM_CTL1_DISP_TH 16
+#define DEFAULT_FM_CTL2_DISP_TH 16
+
+#define DFLT_AXI_DBG_NUM_OF_BEATS 1
+
+#define DFLT_DMA_READ_INT_BUF_LOW(dma_thresh_max_buf) \
+ ((dma_thresh_max_buf + 1) / 2)
+#define DFLT_DMA_READ_INT_BUF_HIGH(dma_thresh_max_buf) \
+ ((dma_thresh_max_buf + 1) * 3 / 4)
+#define DFLT_DMA_WRITE_INT_BUF_LOW(dma_thresh_max_buf) \
+ ((dma_thresh_max_buf + 1) / 2)
+#define DFLT_DMA_WRITE_INT_BUF_HIGH(dma_thresh_max_buf)\
+ ((dma_thresh_max_buf + 1) * 3 / 4)
+
+#define DMA_COMM_Q_LOW_FMAN_V3 0x2A
+#define DMA_COMM_Q_LOW_FMAN_V2(dma_thresh_max_commq) \
+ ((dma_thresh_max_commq + 1) / 2)
+#define DFLT_DMA_COMM_Q_LOW(major, dma_thresh_max_commq) \
+ ((major == 6) ? DMA_COMM_Q_LOW_FMAN_V3 : \
+ DMA_COMM_Q_LOW_FMAN_V2(dma_thresh_max_commq))
+
+#define DMA_COMM_Q_HIGH_FMAN_V3 0x3f
+#define DMA_COMM_Q_HIGH_FMAN_V2(dma_thresh_max_commq) \
+ ((dma_thresh_max_commq + 1) * 3 / 4)
+#define DFLT_DMA_COMM_Q_HIGH(major, dma_thresh_max_commq) \
+ ((major == 6) ? DMA_COMM_Q_HIGH_FMAN_V3 : \
+ DMA_COMM_Q_HIGH_FMAN_V2(dma_thresh_max_commq))
+
+#define TOTAL_NUM_OF_TASKS_FMAN_V3L 59
+#define TOTAL_NUM_OF_TASKS_FMAN_V3H 124
+#define DFLT_TOTAL_NUM_OF_TASKS(major, minor, bmi_max_num_of_tasks) \
+ ((major == 6) ? ((minor == 1 || minor == 4) ? \
+ TOTAL_NUM_OF_TASKS_FMAN_V3L : TOTAL_NUM_OF_TASKS_FMAN_V3H) : \
+ bmi_max_num_of_tasks)
+
+#define DMA_CAM_NUM_OF_ENTRIES_FMAN_V3 64
+#define DMA_CAM_NUM_OF_ENTRIES_FMAN_V2 32
+#define DFLT_DMA_CAM_NUM_OF_ENTRIES(major) \
+ (major == 6 ? DMA_CAM_NUM_OF_ENTRIES_FMAN_V3 : \
+ DMA_CAM_NUM_OF_ENTRIES_FMAN_V2)
+
+#define FM_TIMESTAMP_1_USEC_BIT 8
+
+/* Defines used for enabling/disabling FMan interrupts */
+#define ERR_INTR_EN_DMA 0x00010000
+#define ERR_INTR_EN_FPM 0x80000000
+#define ERR_INTR_EN_BMI 0x00800000
+#define ERR_INTR_EN_QMI 0x00400000
+#define ERR_INTR_EN_MURAM 0x00040000
+#define ERR_INTR_EN_MAC0 0x00004000
+#define ERR_INTR_EN_MAC1 0x00002000
+#define ERR_INTR_EN_MAC2 0x00001000
+#define ERR_INTR_EN_MAC3 0x00000800
+#define ERR_INTR_EN_MAC4 0x00000400
+#define ERR_INTR_EN_MAC5 0x00000200
+#define ERR_INTR_EN_MAC6 0x00000100
+#define ERR_INTR_EN_MAC7 0x00000080
+#define ERR_INTR_EN_MAC8 0x00008000
+#define ERR_INTR_EN_MAC9 0x00000040
+
+#define INTR_EN_QMI 0x40000000
+#define INTR_EN_MAC0 0x00080000
+#define INTR_EN_MAC1 0x00040000
+#define INTR_EN_MAC2 0x00020000
+#define INTR_EN_MAC3 0x00010000
+#define INTR_EN_MAC4 0x00000040
+#define INTR_EN_MAC5 0x00000020
+#define INTR_EN_MAC6 0x00000008
+#define INTR_EN_MAC7 0x00000002
+#define INTR_EN_MAC8 0x00200000
+#define INTR_EN_MAC9 0x00100000
+#define INTR_EN_REV0 0x00008000
+#define INTR_EN_REV1 0x00004000
+#define INTR_EN_REV2 0x00002000
+#define INTR_EN_REV3 0x00001000
+#define INTR_EN_TMR 0x01000000
+
+enum fman_dma_aid_mode {
+ FMAN_DMA_AID_OUT_PORT_ID = 0, /* 4 LSB of PORT_ID */
+ FMAN_DMA_AID_OUT_TNUM /* 4 LSB of TNUM */
+};
+
+struct fman_iram_regs {
+ u32 iadd; /* FM IRAM instruction address register */
+ u32 idata; /* FM IRAM instruction data register */
+ u32 itcfg; /* FM IRAM timing config register */
+ u32 iready; /* FM IRAM ready register */
+};
+
+struct fman_fpm_regs {
+ u32 fmfp_tnc; /* FPM TNUM Control 0x00 */
+ u32 fmfp_prc; /* FPM Port_ID FmCtl Association 0x04 */
+ u32 fmfp_brkc; /* FPM Breakpoint Control 0x08 */
+ u32 fmfp_mxd; /* FPM Flush Control 0x0c */
+ u32 fmfp_dist1; /* FPM Dispatch Thresholds1 0x10 */
+ u32 fmfp_dist2; /* FPM Dispatch Thresholds2 0x14 */
+ u32 fm_epi; /* FM Error Pending Interrupts 0x18 */
+ u32 fm_rie; /* FM Error Interrupt Enable 0x1c */
+ u32 fmfp_fcev[4]; /* FPM FMan-Controller Event 1-4 0x20-0x2f */
+ u32 res0030[4]; /* res 0x30 - 0x3f */
+ u32 fmfp_cee[4]; /* PM FMan-Controller Event 1-4 0x40-0x4f */
+ u32 res0050[4]; /* res 0x50-0x5f */
+ u32 fmfp_tsc1; /* FPM TimeStamp Control1 0x60 */
+ u32 fmfp_tsc2; /* FPM TimeStamp Control2 0x64 */
+ u32 fmfp_tsp; /* FPM Time Stamp 0x68 */
+ u32 fmfp_tsf; /* FPM Time Stamp Fraction 0x6c */
+ u32 fm_rcr; /* FM Rams Control 0x70 */
+ u32 fmfp_extc; /* FPM External Requests Control 0x74 */
+ u32 fmfp_ext1; /* FPM External Requests Config1 0x78 */
+ u32 fmfp_ext2; /* FPM External Requests Config2 0x7c */
+ u32 fmfp_drd[16]; /* FPM Data_Ram Data 0-15 0x80 - 0xbf */
+ u32 fmfp_dra; /* FPM Data Ram Access 0xc0 */
+ u32 fm_ip_rev_1; /* FM IP Block Revision 1 0xc4 */
+ u32 fm_ip_rev_2; /* FM IP Block Revision 2 0xc8 */
+ u32 fm_rstc; /* FM Reset Command 0xcc */
+ u32 fm_cld; /* FM Classifier Debug 0xd0 */
+ u32 fm_npi; /* FM Normal Pending Interrupts 0xd4 */
+ u32 fmfp_exte; /* FPM External Requests Enable 0xd8 */
+ u32 fmfp_ee; /* FPM Event&Mask 0xdc */
+ u32 fmfp_cev[4]; /* FPM CPU Event 1-4 0xe0-0xef */
+ u32 res00f0[4]; /* res 0xf0-0xff */
+ u32 fmfp_ps[50]; /* FPM Port Status 0x100-0x1c7 */
+ u32 res01c8[14]; /* res 0x1c8-0x1ff */
+ u32 fmfp_clfabc; /* FPM CLFABC 0x200 */
+ u32 fmfp_clfcc; /* FPM CLFCC 0x204 */
+ u32 fmfp_clfaval; /* FPM CLFAVAL 0x208 */
+ u32 fmfp_clfbval; /* FPM CLFBVAL 0x20c */
+ u32 fmfp_clfcval; /* FPM CLFCVAL 0x210 */
+ u32 fmfp_clfamsk; /* FPM CLFAMSK 0x214 */
+ u32 fmfp_clfbmsk; /* FPM CLFBMSK 0x218 */
+ u32 fmfp_clfcmsk; /* FPM CLFCMSK 0x21c */
+ u32 fmfp_clfamc; /* FPM CLFAMC 0x220 */
+ u32 fmfp_clfbmc; /* FPM CLFBMC 0x224 */
+ u32 fmfp_clfcmc; /* FPM CLFCMC 0x228 */
+ u32 fmfp_decceh; /* FPM DECCEH 0x22c */
+ u32 res0230[116]; /* res 0x230 - 0x3ff */
+ u32 fmfp_ts[128]; /* 0x400: FPM Task Status 0x400 - 0x5ff */
+ u32 res0600[0x400 - 384];
+};
+
+struct fman_bmi_regs {
+ u32 fmbm_init; /* BMI Initialization 0x00 */
+ u32 fmbm_cfg1; /* BMI Configuration 1 0x04 */
+ u32 fmbm_cfg2; /* BMI Configuration 2 0x08 */
+ u32 res000c[5]; /* 0x0c - 0x1f */
+ u32 fmbm_ievr; /* Interrupt Event Register 0x20 */
+ u32 fmbm_ier; /* Interrupt Enable Register 0x24 */
+ u32 fmbm_ifr; /* Interrupt Force Register 0x28 */
+ u32 res002c[5]; /* 0x2c - 0x3f */
+ u32 fmbm_arb[8]; /* BMI Arbitration 0x40 - 0x5f */
+ u32 res0060[12]; /* 0x60 - 0x8f */
+ u32 fmbm_dtc[3]; /* Debug Trap Counter 0x90 - 0x9b */
+ u32 res009c; /* 0x9c */
+ u32 fmbm_dcv[3][4]; /* Debug Compare val 0xa0-0xcf */
+ u32 fmbm_dcm[3][4]; /* Debug Compare Mask 0xd0-0xff */
+ u32 fmbm_gde; /* BMI Global Debug Enable 0x100 */
+ u32 fmbm_pp[63]; /* BMI Port Parameters 0x104 - 0x1ff */
+ u32 res0200; /* 0x200 */
+ u32 fmbm_pfs[63]; /* BMI Port FIFO Size 0x204 - 0x2ff */
+ u32 res0300; /* 0x300 */
+ u32 fmbm_spliodn[63]; /* Port Partition ID 0x304 - 0x3ff */
+};
+
+struct fman_qmi_regs {
+ u32 fmqm_gc; /* General Configuration Register 0x00 */
+ u32 res0004; /* 0x04 */
+ u32 fmqm_eie; /* Error Interrupt Event Register 0x08 */
+ u32 fmqm_eien; /* Error Interrupt Enable Register 0x0c */
+ u32 fmqm_eif; /* Error Interrupt Force Register 0x10 */
+ u32 fmqm_ie; /* Interrupt Event Register 0x14 */
+ u32 fmqm_ien; /* Interrupt Enable Register 0x18 */
+ u32 fmqm_if; /* Interrupt Force Register 0x1c */
+ u32 fmqm_gs; /* Global Status Register 0x20 */
+ u32 fmqm_ts; /* Task Status Register 0x24 */
+ u32 fmqm_etfc; /* Enqueue Total Frame Counter 0x28 */
+ u32 fmqm_dtfc; /* Dequeue Total Frame Counter 0x2c */
+ u32 fmqm_dc0; /* Dequeue Counter 0 0x30 */
+ u32 fmqm_dc1; /* Dequeue Counter 1 0x34 */
+ u32 fmqm_dc2; /* Dequeue Counter 2 0x38 */
+ u32 fmqm_dc3; /* Dequeue Counter 3 0x3c */
+ u32 fmqm_dfdc; /* Dequeue FQID from Default Counter 0x40 */
+ u32 fmqm_dfcc; /* Dequeue FQID from Context Counter 0x44 */
+ u32 fmqm_dffc; /* Dequeue FQID from FD Counter 0x48 */
+ u32 fmqm_dcc; /* Dequeue Confirm Counter 0x4c */
+ u32 res0050[7]; /* 0x50 - 0x6b */
+ u32 fmqm_tapc; /* Tnum Aging Period Control 0x6c */
+ u32 fmqm_dmcvc; /* Dequeue MAC Command Valid Counter 0x70 */
+ u32 fmqm_difdcc; /* Dequeue Invalid FD Command Counter 0x74 */
+ u32 fmqm_da1v; /* Dequeue A1 Valid Counter 0x78 */
+ u32 res007c; /* 0x7c */
+ u32 fmqm_dtc; /* 0x80 Debug Trap Counter 0x80 */
+ u32 fmqm_efddd; /* 0x84 Enqueue Frame desc Dynamic dbg 0x84 */
+ u32 res0088[2]; /* 0x88 - 0x8f */
+ struct {
+ u32 fmqm_dtcfg1; /* 0x90 dbg trap cfg 1 Register 0x00 */
+ u32 fmqm_dtval1; /* Debug Trap Value 1 Register 0x04 */
+ u32 fmqm_dtm1; /* Debug Trap Mask 1 Register 0x08 */
+ u32 fmqm_dtc1; /* Debug Trap Counter 1 Register 0x0c */
+ u32 fmqm_dtcfg2; /* dbg Trap cfg 2 Register 0x10 */
+ u32 fmqm_dtval2; /* Debug Trap Value 2 Register 0x14 */
+ u32 fmqm_dtm2; /* Debug Trap Mask 2 Register 0x18 */
+ u32 res001c; /* 0x1c */
+ } dbg_traps[3]; /* 0x90 - 0xef */
+ u8 res00f0[0x400 - 0xf0]; /* 0xf0 - 0x3ff */
+};
+
+struct fman_dma_regs {
+ u32 fmdmsr; /* FM DMA status register 0x00 */
+ u32 fmdmmr; /* FM DMA mode register 0x04 */
+ u32 fmdmtr; /* FM DMA bus threshold register 0x08 */
+ u32 fmdmhy; /* FM DMA bus hysteresis register 0x0c */
+ u32 fmdmsetr; /* FM DMA SOS emergency Threshold Register 0x10 */
+ u32 fmdmtah; /* FM DMA transfer bus address high reg 0x14 */
+ u32 fmdmtal; /* FM DMA transfer bus address low reg 0x18 */
+ u32 fmdmtcid; /* FM DMA transfer bus communication ID reg 0x1c */
+ u32 fmdmra; /* FM DMA bus internal ram address register 0x20 */
+ u32 fmdmrd; /* FM DMA bus internal ram data register 0x24 */
+ u32 fmdmwcr; /* FM DMA CAM watchdog counter value 0x28 */
+ u32 fmdmebcr; /* FM DMA CAM base in MURAM register 0x2c */
+ u32 fmdmccqdr; /* FM DMA CAM and CMD Queue Debug reg 0x30 */
+ u32 fmdmccqvr1; /* FM DMA CAM and CMD Queue Value reg #1 0x34 */
+ u32 fmdmccqvr2; /* FM DMA CAM and CMD Queue Value reg #2 0x38 */
+ u32 fmdmcqvr3; /* FM DMA CMD Queue Value register #3 0x3c */
+ u32 fmdmcqvr4; /* FM DMA CMD Queue Value register #4 0x40 */
+ u32 fmdmcqvr5; /* FM DMA CMD Queue Value register #5 0x44 */
+ u32 fmdmsefrc; /* FM DMA Semaphore Entry Full Reject Cntr 0x48 */
+ u32 fmdmsqfrc; /* FM DMA Semaphore Queue Full Reject Cntr 0x4c */
+ u32 fmdmssrc; /* FM DMA Semaphore SYNC Reject Counter 0x50 */
+ u32 fmdmdcr; /* FM DMA Debug Counter 0x54 */
+ u32 fmdmemsr; /* FM DMA Emergency Smoother Register 0x58 */
+ u32 res005c; /* 0x5c */
+ u32 fmdmplr[FMAN_LIODN_TBL / 2]; /* DMA LIODN regs 0x60-0xdf */
+ u32 res00e0[0x400 - 56];
+};
+
+/* Structure that holds current FMan state.
+ * Used for saving run time information.
+ */
+struct fman_state_struct {
+ u8 fm_id;
+ u16 fm_clk_freq;
+ struct fman_rev_info rev_info;
+ bool enabled_time_stamp;
+ u8 count1_micro_bit;
+ u8 total_num_of_tasks;
+ u8 accumulated_num_of_tasks;
+ u32 accumulated_fifo_size;
+ u8 accumulated_num_of_open_dmas;
+ u8 accumulated_num_of_deq_tnums;
+ u32 exceptions;
+ u32 extra_fifo_pool_size;
+ u8 extra_tasks_pool_size;
+ u8 extra_open_dmas_pool_size;
+ u16 port_mfl[MAX_NUM_OF_MACS];
+ u16 mac_mfl[MAX_NUM_OF_MACS];
+
+ /* SOC specific */
+ u32 fm_iram_size;
+ /* DMA */
+ u32 dma_thresh_max_commq;
+ u32 dma_thresh_max_buf;
+ u32 max_num_of_open_dmas;
+ /* QMI */
+ u32 qmi_max_num_of_tnums;
+ u32 qmi_def_tnums_thresh;
+ /* BMI */
+ u32 bmi_max_num_of_tasks;
+ u32 bmi_max_fifo_size;
+ /* General */
+ u32 fm_port_num_of_cg;
+ u32 num_of_rx_ports;
+ u32 total_fifo_size;
+
+ u32 qman_channel_base;
+ u32 num_of_qman_channels;
+
+ struct resource *res;
+};
+
+/* Structure that holds FMan initial configuration */
+struct fman_cfg {
+ u8 disp_limit_tsh;
+ u8 prs_disp_tsh;
+ u8 plcr_disp_tsh;
+ u8 kg_disp_tsh;
+ u8 bmi_disp_tsh;
+ u8 qmi_enq_disp_tsh;
+ u8 qmi_deq_disp_tsh;
+ u8 fm_ctl1_disp_tsh;
+ u8 fm_ctl2_disp_tsh;
+ int dma_cache_override;
+ enum fman_dma_aid_mode dma_aid_mode;
+ u32 dma_axi_dbg_num_of_beats;
+ u32 dma_cam_num_of_entries;
+ u32 dma_watchdog;
+ u8 dma_comm_qtsh_asrt_emer;
+ u32 dma_write_buf_tsh_asrt_emer;
+ u32 dma_read_buf_tsh_asrt_emer;
+ u8 dma_comm_qtsh_clr_emer;
+ u32 dma_write_buf_tsh_clr_emer;
+ u32 dma_read_buf_tsh_clr_emer;
+ u32 dma_sos_emergency;
+ int dma_dbg_cnt_mode;
+ int catastrophic_err;
+ int dma_err;
+ u32 exceptions;
+ u16 clk_freq;
+ u32 cam_base_addr;
+ u32 fifo_base_addr;
+ u32 total_fifo_size;
+ u32 total_num_of_tasks;
+ u32 qmi_def_tnums_thresh;
+};
+
+/* Structure that holds information received from device tree */
+struct fman_dts_params {
+ void __iomem *base_addr; /* FMan virtual address */
+ struct resource *res; /* FMan memory resource */
+ u8 id; /* FMan ID */
+
+ int err_irq; /* FMan Error IRQ */
+
+ u16 clk_freq; /* FMan clock freq (In Mhz) */
+
+ u32 qman_channel_base; /* QMan channels base */
+ u32 num_of_qman_channels; /* Number of QMan channels */
+
+ struct resource muram_res; /* MURAM resource */
+};
+
+/** fman_exceptions_cb
+ * fman - Pointer to FMan
+ * exception - The exception.
+ *
+ * Exceptions user callback routine, will be called upon an exception
+ * passing the exception identification.
+ *
+ * Return: irq status
+ */
+typedef irqreturn_t (fman_exceptions_cb)(struct fman *fman,
+ enum fman_exceptions exception);
+
+/** fman_bus_error_cb
+ * fman - Pointer to FMan
+ * port_id - Port id
+ * addr - Address that caused the error
+ * tnum - Owner of error
+ * liodn - Logical IO device number
+ *
+ * Bus error user callback routine, will be called upon bus error,
+ * passing parameters describing the errors and the owner.
+ *
+ * Return: IRQ status
+ */
+typedef irqreturn_t (fman_bus_error_cb)(struct fman *fman, u8 port_id,
+ u64 addr, u8 tnum, u16 liodn);
+
+struct fman {
+ struct device *dev;
+ void __iomem *base_addr;
+ struct fman_intr_src intr_mng[FMAN_EV_CNT];
+
+ struct fman_fpm_regs __iomem *fpm_regs;
+ struct fman_bmi_regs __iomem *bmi_regs;
+ struct fman_qmi_regs __iomem *qmi_regs;
+ struct fman_dma_regs __iomem *dma_regs;
+ fman_exceptions_cb *exception_cb;
+ fman_bus_error_cb *bus_error_cb;
+ /* Spinlock for FMan use */
+ spinlock_t spinlock;
+ struct fman_state_struct *state;
+
+ struct fman_cfg *cfg;
+ struct muram_info *muram;
+ /* cam section in muram */
+ int cam_offset;
+ size_t cam_size;
+ /* Fifo in MURAM */
+ int fifo_offset;
+ size_t fifo_size;
+
+ u32 liodn_base[64];
+ u32 liodn_offset[64];
+
+ struct fman_dts_params dts_params;
+};
+
+static irqreturn_t fman_exceptions(struct fman *fman,
+ enum fman_exceptions exception)
+{
+ dev_dbg(fman->dev, "%s: FMan[%d] exception %d\n",
+ __func__, fman->state->fm_id, exception);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fman_bus_error(struct fman *fman, u8 __maybe_unused port_id,
+ u64 __maybe_unused addr,
+ u8 __maybe_unused tnum,
+ u16 __maybe_unused liodn)
+{
+ dev_dbg(fman->dev, "%s: FMan[%d] bus error: port_id[%d]\n",
+ __func__, fman->state->fm_id, port_id);
+
+ return IRQ_HANDLED;
+}
+
+static inline irqreturn_t call_mac_isr(struct fman *fman, u8 id)
+{
+ if (fman->intr_mng[id].isr_cb) {
+ fman->intr_mng[id].isr_cb(fman->intr_mng[id].src_handle);
+
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static inline u8 hw_port_id_to_sw_port_id(u8 major, u8 hw_port_id)
+{
+ u8 sw_port_id = 0;
+
+ if (hw_port_id >= BASE_TX_PORTID)
+ sw_port_id = hw_port_id - BASE_TX_PORTID;
+ else if (hw_port_id >= BASE_RX_PORTID)
+ sw_port_id = hw_port_id - BASE_RX_PORTID;
+ else
+ sw_port_id = 0;
+
+ return sw_port_id;
+}
+
+static void set_port_order_restoration(struct fman_fpm_regs __iomem *fpm_rg,
+ u8 port_id)
+{
+ u32 tmp = 0;
+
+ tmp = port_id << FPM_PORT_FM_CTL_PORTID_SHIFT;
+
+ tmp |= FPM_PRT_FM_CTL2 | FPM_PRT_FM_CTL1;
+
+ /* order restoration */
+ if (port_id % 2)
+ tmp |= FPM_PRT_FM_CTL1 << FPM_PRC_ORA_FM_CTL_SEL_SHIFT;
+ else
+ tmp |= FPM_PRT_FM_CTL2 << FPM_PRC_ORA_FM_CTL_SEL_SHIFT;
+
+ iowrite32be(tmp, &fpm_rg->fmfp_prc);
+}
+
+static void set_port_liodn(struct fman *fman, u8 port_id,
+ u32 liodn_base, u32 liodn_ofst)
+{
+ u32 tmp;
+
+ /* set LIODN base for this port */
+ tmp = ioread32be(&fman->dma_regs->fmdmplr[port_id / 2]);
+ if (port_id % 2) {
+ tmp &= ~DMA_LIODN_BASE_MASK;
+ tmp |= liodn_base;
+ } else {
+ tmp &= ~(DMA_LIODN_BASE_MASK << DMA_LIODN_SHIFT);
+ tmp |= liodn_base << DMA_LIODN_SHIFT;
+ }
+ iowrite32be(tmp, &fman->dma_regs->fmdmplr[port_id / 2]);
+ iowrite32be(liodn_ofst, &fman->bmi_regs->fmbm_spliodn[port_id - 1]);
+}
+
+static void enable_rams_ecc(struct fman_fpm_regs __iomem *fpm_rg)
+{
+ u32 tmp;
+
+ tmp = ioread32be(&fpm_rg->fm_rcr);
+ if (tmp & FPM_RAM_RAMS_ECC_EN_SRC_SEL)
+ iowrite32be(tmp | FPM_RAM_IRAM_ECC_EN, &fpm_rg->fm_rcr);
+ else
+ iowrite32be(tmp | FPM_RAM_RAMS_ECC_EN |
+ FPM_RAM_IRAM_ECC_EN, &fpm_rg->fm_rcr);
+}
+
+static void disable_rams_ecc(struct fman_fpm_regs __iomem *fpm_rg)
+{
+ u32 tmp;
+
+ tmp = ioread32be(&fpm_rg->fm_rcr);
+ if (tmp & FPM_RAM_RAMS_ECC_EN_SRC_SEL)
+ iowrite32be(tmp & ~FPM_RAM_IRAM_ECC_EN, &fpm_rg->fm_rcr);
+ else
+ iowrite32be(tmp & ~(FPM_RAM_RAMS_ECC_EN | FPM_RAM_IRAM_ECC_EN),
+ &fpm_rg->fm_rcr);
+}
+
+static void fman_defconfig(struct fman_cfg *cfg)
+{
+ memset(cfg, 0, sizeof(struct fman_cfg));
+
+ cfg->catastrophic_err = DEFAULT_CATASTROPHIC_ERR;
+ cfg->dma_err = DEFAULT_DMA_ERR;
+ cfg->dma_aid_mode = DEFAULT_AID_MODE;
+ cfg->dma_comm_qtsh_clr_emer = DEFAULT_DMA_COMM_Q_LOW;
+ cfg->dma_comm_qtsh_asrt_emer = DEFAULT_DMA_COMM_Q_HIGH;
+ cfg->dma_cache_override = DEFAULT_CACHE_OVERRIDE;
+ cfg->dma_cam_num_of_entries = DEFAULT_DMA_CAM_NUM_OF_ENTRIES;
+ cfg->dma_dbg_cnt_mode = DEFAULT_DMA_DBG_CNT_MODE;
+ cfg->dma_sos_emergency = DEFAULT_DMA_SOS_EMERGENCY;
+ cfg->dma_watchdog = DEFAULT_DMA_WATCHDOG;
+ cfg->disp_limit_tsh = DEFAULT_DISP_LIMIT;
+ cfg->prs_disp_tsh = DEFAULT_PRS_DISP_TH;
+ cfg->plcr_disp_tsh = DEFAULT_PLCR_DISP_TH;
+ cfg->kg_disp_tsh = DEFAULT_KG_DISP_TH;
+ cfg->bmi_disp_tsh = DEFAULT_BMI_DISP_TH;
+ cfg->qmi_enq_disp_tsh = DEFAULT_QMI_ENQ_DISP_TH;
+ cfg->qmi_deq_disp_tsh = DEFAULT_QMI_DEQ_DISP_TH;
+ cfg->fm_ctl1_disp_tsh = DEFAULT_FM_CTL1_DISP_TH;
+ cfg->fm_ctl2_disp_tsh = DEFAULT_FM_CTL2_DISP_TH;
+}
+
+static int dma_init(struct fman *fman)
+{
+ struct fman_dma_regs __iomem *dma_rg = fman->dma_regs;
+ struct fman_cfg *cfg = fman->cfg;
+ u32 tmp_reg;
+
+ /* Init DMA Registers */
+
+ /* clear status reg events */
+ tmp_reg = (DMA_STATUS_BUS_ERR | DMA_STATUS_READ_ECC |
+ DMA_STATUS_SYSTEM_WRITE_ECC | DMA_STATUS_FM_WRITE_ECC);
+ iowrite32be(ioread32be(&dma_rg->fmdmsr) | tmp_reg, &dma_rg->fmdmsr);
+
+ /* configure mode register */
+ tmp_reg = 0;
+ tmp_reg |= cfg->dma_cache_override << DMA_MODE_CACHE_OR_SHIFT;
+ if (cfg->exceptions & EX_DMA_BUS_ERROR)
+ tmp_reg |= DMA_MODE_BER;
+ if ((cfg->exceptions & EX_DMA_SYSTEM_WRITE_ECC) |
+ (cfg->exceptions & EX_DMA_READ_ECC) |
+ (cfg->exceptions & EX_DMA_FM_WRITE_ECC))
+ tmp_reg |= DMA_MODE_ECC;
+ if (cfg->dma_axi_dbg_num_of_beats)
+ tmp_reg |= (DMA_MODE_AXI_DBG_MASK &
+ ((cfg->dma_axi_dbg_num_of_beats - 1)
+ << DMA_MODE_AXI_DBG_SHIFT));
+
+ tmp_reg |= (((cfg->dma_cam_num_of_entries / DMA_CAM_UNITS) - 1) &
+ DMA_MODE_CEN_MASK) << DMA_MODE_CEN_SHIFT;
+ tmp_reg |= DMA_MODE_SECURE_PROT;
+ tmp_reg |= cfg->dma_dbg_cnt_mode << DMA_MODE_DBG_SHIFT;
+ tmp_reg |= cfg->dma_aid_mode << DMA_MODE_AID_MODE_SHIFT;
+
+ iowrite32be(tmp_reg, &dma_rg->fmdmmr);
+
+ /* configure thresholds register */
+ tmp_reg = ((u32)cfg->dma_comm_qtsh_asrt_emer <<
+ DMA_THRESH_COMMQ_SHIFT);
+ tmp_reg |= (cfg->dma_read_buf_tsh_asrt_emer &
+ DMA_THRESH_READ_INT_BUF_MASK) << DMA_THRESH_READ_INT_BUF_SHIFT;
+ tmp_reg |= cfg->dma_write_buf_tsh_asrt_emer &
+ DMA_THRESH_WRITE_INT_BUF_MASK;
+
+ iowrite32be(tmp_reg, &dma_rg->fmdmtr);
+
+ /* configure hysteresis register */
+ tmp_reg = ((u32)cfg->dma_comm_qtsh_clr_emer <<
+ DMA_THRESH_COMMQ_SHIFT);
+ tmp_reg |= (cfg->dma_read_buf_tsh_clr_emer &
+ DMA_THRESH_READ_INT_BUF_MASK) << DMA_THRESH_READ_INT_BUF_SHIFT;
+ tmp_reg |= cfg->dma_write_buf_tsh_clr_emer &
+ DMA_THRESH_WRITE_INT_BUF_MASK;
+
+ iowrite32be(tmp_reg, &dma_rg->fmdmhy);
+
+ /* configure emergency threshold */
+ iowrite32be(cfg->dma_sos_emergency, &dma_rg->fmdmsetr);
+
+ /* configure Watchdog */
+ iowrite32be((cfg->dma_watchdog * cfg->clk_freq), &dma_rg->fmdmwcr);
+
+ iowrite32be(cfg->cam_base_addr, &dma_rg->fmdmebcr);
+
+ /* Allocate MURAM for CAM */
+ fman->cam_size =
+ (u32)(fman->cfg->dma_cam_num_of_entries * DMA_CAM_SIZEOF_ENTRY);
+ fman->cam_offset = fman_muram_alloc(fman->muram, fman->cam_size);
+ if (IS_ERR_VALUE(fman->cam_offset)) {
+ dev_err(fman->dev, "%s: MURAM alloc for DMA CAM failed\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ if (fman->state->rev_info.major == 2) {
+ u32 __iomem *cam_base_addr;
+
+ fman_muram_free_mem(fman->muram, fman->cam_offset,
+ fman->cam_size);
+
+ fman->cam_size = fman->cfg->dma_cam_num_of_entries * 72 + 128;
+ fman->cam_offset = fman_muram_alloc(fman->muram,
+ fman->cam_size);
+ if (IS_ERR_VALUE(fman->cam_offset)) {
+ dev_err(fman->dev, "%s: MURAM alloc for DMA CAM failed\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ if (fman->cfg->dma_cam_num_of_entries % 8 ||
+ fman->cfg->dma_cam_num_of_entries > 32) {
+ dev_err(fman->dev, "%s: wrong dma_cam_num_of_entries\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ cam_base_addr = (u32 __iomem *)
+ fman_muram_offset_to_vbase(fman->muram,
+ fman->cam_offset);
+ iowrite32be(~((1 <<
+ (32 - fman->cfg->dma_cam_num_of_entries)) - 1),
+ cam_base_addr);
+ }
+
+ fman->cfg->cam_base_addr = fman->cam_offset;
+
+ return 0;
+}
+
+static void fpm_init(struct fman_fpm_regs __iomem *fpm_rg, struct fman_cfg *cfg)
+{
+ u32 tmp_reg;
+ int i;
+
+ /* Init FPM Registers */
+
+ tmp_reg = (u32)(cfg->disp_limit_tsh << FPM_DISP_LIMIT_SHIFT);
+ iowrite32be(tmp_reg, &fpm_rg->fmfp_mxd);
+
+ tmp_reg = (((u32)cfg->prs_disp_tsh << FPM_THR1_PRS_SHIFT) |
+ ((u32)cfg->kg_disp_tsh << FPM_THR1_KG_SHIFT) |
+ ((u32)cfg->plcr_disp_tsh << FPM_THR1_PLCR_SHIFT) |
+ ((u32)cfg->bmi_disp_tsh << FPM_THR1_BMI_SHIFT));
+ iowrite32be(tmp_reg, &fpm_rg->fmfp_dist1);
+
+ tmp_reg =
+ (((u32)cfg->qmi_enq_disp_tsh << FPM_THR2_QMI_ENQ_SHIFT) |
+ ((u32)cfg->qmi_deq_disp_tsh << FPM_THR2_QMI_DEQ_SHIFT) |
+ ((u32)cfg->fm_ctl1_disp_tsh << FPM_THR2_FM_CTL1_SHIFT) |
+ ((u32)cfg->fm_ctl2_disp_tsh << FPM_THR2_FM_CTL2_SHIFT));
+ iowrite32be(tmp_reg, &fpm_rg->fmfp_dist2);
+
+ /* define exceptions and error behavior */
+ tmp_reg = 0;
+ /* Clear events */
+ tmp_reg |= (FPM_EV_MASK_STALL | FPM_EV_MASK_DOUBLE_ECC |
+ FPM_EV_MASK_SINGLE_ECC);
+ /* enable interrupts */
+ if (cfg->exceptions & EX_FPM_STALL_ON_TASKS)
+ tmp_reg |= FPM_EV_MASK_STALL_EN;
+ if (cfg->exceptions & EX_FPM_SINGLE_ECC)
+ tmp_reg |= FPM_EV_MASK_SINGLE_ECC_EN;
+ if (cfg->exceptions & EX_FPM_DOUBLE_ECC)
+ tmp_reg |= FPM_EV_MASK_DOUBLE_ECC_EN;
+ tmp_reg |= (cfg->catastrophic_err << FPM_EV_MASK_CAT_ERR_SHIFT);
+ tmp_reg |= (cfg->dma_err << FPM_EV_MASK_DMA_ERR_SHIFT);
+ /* FMan is not halted upon external halt activation */
+ tmp_reg |= FPM_EV_MASK_EXTERNAL_HALT;
+ /* Man is not halted upon Unrecoverable ECC error behavior */
+ tmp_reg |= FPM_EV_MASK_ECC_ERR_HALT;
+ iowrite32be(tmp_reg, &fpm_rg->fmfp_ee);
+
+ /* clear all fmCtls event registers */
+ for (i = 0; i < FM_NUM_OF_FMAN_CTRL_EVENT_REGS; i++)
+ iowrite32be(0xFFFFFFFF, &fpm_rg->fmfp_cev[i]);
+
+ /* RAM ECC - enable and clear events */
+ /* first we need to clear all parser memory,
+ * as it is uninitialized and may cause ECC errors
+ */
+ /* event bits */
+ tmp_reg = (FPM_RAM_MURAM_ECC | FPM_RAM_IRAM_ECC);
+
+ iowrite32be(tmp_reg, &fpm_rg->fm_rcr);
+
+ tmp_reg = 0;
+ if (cfg->exceptions & EX_IRAM_ECC) {
+ tmp_reg |= FPM_IRAM_ECC_ERR_EX_EN;
+ enable_rams_ecc(fpm_rg);
+ }
+ if (cfg->exceptions & EX_MURAM_ECC) {
+ tmp_reg |= FPM_MURAM_ECC_ERR_EX_EN;
+ enable_rams_ecc(fpm_rg);
+ }
+ iowrite32be(tmp_reg, &fpm_rg->fm_rie);
+}
+
+static void bmi_init(struct fman_bmi_regs __iomem *bmi_rg,
+ struct fman_cfg *cfg)
+{
+ u32 tmp_reg;
+
+ /* Init BMI Registers */
+
+ /* define common resources */
+ tmp_reg = cfg->fifo_base_addr;
+ tmp_reg = tmp_reg / BMI_FIFO_ALIGN;
+
+ tmp_reg |= ((cfg->total_fifo_size / FMAN_BMI_FIFO_UNITS - 1) <<
+ BMI_CFG1_FIFO_SIZE_SHIFT);
+ iowrite32be(tmp_reg, &bmi_rg->fmbm_cfg1);
+
+ tmp_reg = ((cfg->total_num_of_tasks - 1) & BMI_CFG2_TASKS_MASK) <<
+ BMI_CFG2_TASKS_SHIFT;
+ /* num of DMA's will be dynamically updated when each port is set */
+ iowrite32be(tmp_reg, &bmi_rg->fmbm_cfg2);
+
+ /* define unmaskable exceptions, enable and clear events */
+ tmp_reg = 0;
+ iowrite32be(BMI_ERR_INTR_EN_LIST_RAM_ECC |
+ BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC |
+ BMI_ERR_INTR_EN_STATISTICS_RAM_ECC |
+ BMI_ERR_INTR_EN_DISPATCH_RAM_ECC, &bmi_rg->fmbm_ievr);
+
+ if (cfg->exceptions & EX_BMI_LIST_RAM_ECC)
+ tmp_reg |= BMI_ERR_INTR_EN_LIST_RAM_ECC;
+ if (cfg->exceptions & EX_BMI_STORAGE_PROFILE_ECC)
+ tmp_reg |= BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC;
+ if (cfg->exceptions & EX_BMI_STATISTICS_RAM_ECC)
+ tmp_reg |= BMI_ERR_INTR_EN_STATISTICS_RAM_ECC;
+ if (cfg->exceptions & EX_BMI_DISPATCH_RAM_ECC)
+ tmp_reg |= BMI_ERR_INTR_EN_DISPATCH_RAM_ECC;
+ iowrite32be(tmp_reg, &bmi_rg->fmbm_ier);
+}
+
+static void qmi_init(struct fman_qmi_regs __iomem *qmi_rg,
+ struct fman_cfg *cfg)
+{
+ u32 tmp_reg;
+
+ /* Init QMI Registers */
+
+ /* Clear error interrupt events */
+
+ iowrite32be(QMI_ERR_INTR_EN_DOUBLE_ECC | QMI_ERR_INTR_EN_DEQ_FROM_DEF,
+ &qmi_rg->fmqm_eie);
+ tmp_reg = 0;
+ if (cfg->exceptions & EX_QMI_DEQ_FROM_UNKNOWN_PORTID)
+ tmp_reg |= QMI_ERR_INTR_EN_DEQ_FROM_DEF;
+ if (cfg->exceptions & EX_QMI_DOUBLE_ECC)
+ tmp_reg |= QMI_ERR_INTR_EN_DOUBLE_ECC;
+ /* enable events */
+ iowrite32be(tmp_reg, &qmi_rg->fmqm_eien);
+
+ tmp_reg = 0;
+ /* Clear interrupt events */
+ iowrite32be(QMI_INTR_EN_SINGLE_ECC, &qmi_rg->fmqm_ie);
+ if (cfg->exceptions & EX_QMI_SINGLE_ECC)
+ tmp_reg |= QMI_INTR_EN_SINGLE_ECC;
+ /* enable events */
+ iowrite32be(tmp_reg, &qmi_rg->fmqm_ien);
+}
+
+static int enable(struct fman *fman, struct fman_cfg *cfg)
+{
+ u32 cfg_reg = 0;
+
+ /* Enable all modules */
+
+ /* clear&enable global counters - calculate reg and save for later,
+ * because it's the same reg for QMI enable
+ */
+ cfg_reg = QMI_CFG_EN_COUNTERS;
+
+ /* Set enqueue and dequeue thresholds */
+ cfg_reg |= (cfg->qmi_def_tnums_thresh << 8) | cfg->qmi_def_tnums_thresh;
+
+ iowrite32be(BMI_INIT_START, &fman->bmi_regs->fmbm_init);
+ iowrite32be(cfg_reg | QMI_CFG_ENQ_EN | QMI_CFG_DEQ_EN,
+ &fman->qmi_regs->fmqm_gc);
+
+ return 0;
+}
+
+static int set_exception(struct fman *fman,
+ enum fman_exceptions exception, bool enable)
+{
+ u32 tmp;
+
+ switch (exception) {
+ case FMAN_EX_DMA_BUS_ERROR:
+ tmp = ioread32be(&fman->dma_regs->fmdmmr);
+ if (enable)
+ tmp |= DMA_MODE_BER;
+ else
+ tmp &= ~DMA_MODE_BER;
+ /* disable bus error */
+ iowrite32be(tmp, &fman->dma_regs->fmdmmr);
+ break;
+ case FMAN_EX_DMA_READ_ECC:
+ case FMAN_EX_DMA_SYSTEM_WRITE_ECC:
+ case FMAN_EX_DMA_FM_WRITE_ECC:
+ tmp = ioread32be(&fman->dma_regs->fmdmmr);
+ if (enable)
+ tmp |= DMA_MODE_ECC;
+ else
+ tmp &= ~DMA_MODE_ECC;
+ iowrite32be(tmp, &fman->dma_regs->fmdmmr);
+ break;
+ case FMAN_EX_FPM_STALL_ON_TASKS:
+ tmp = ioread32be(&fman->fpm_regs->fmfp_ee);
+ if (enable)
+ tmp |= FPM_EV_MASK_STALL_EN;
+ else
+ tmp &= ~FPM_EV_MASK_STALL_EN;
+ iowrite32be(tmp, &fman->fpm_regs->fmfp_ee);
+ break;
+ case FMAN_EX_FPM_SINGLE_ECC:
+ tmp = ioread32be(&fman->fpm_regs->fmfp_ee);
+ if (enable)
+ tmp |= FPM_EV_MASK_SINGLE_ECC_EN;
+ else
+ tmp &= ~FPM_EV_MASK_SINGLE_ECC_EN;
+ iowrite32be(tmp, &fman->fpm_regs->fmfp_ee);
+ break;
+ case FMAN_EX_FPM_DOUBLE_ECC:
+ tmp = ioread32be(&fman->fpm_regs->fmfp_ee);
+ if (enable)
+ tmp |= FPM_EV_MASK_DOUBLE_ECC_EN;
+ else
+ tmp &= ~FPM_EV_MASK_DOUBLE_ECC_EN;
+ iowrite32be(tmp, &fman->fpm_regs->fmfp_ee);
+ break;
+ case FMAN_EX_QMI_SINGLE_ECC:
+ tmp = ioread32be(&fman->qmi_regs->fmqm_ien);
+ if (enable)
+ tmp |= QMI_INTR_EN_SINGLE_ECC;
+ else
+ tmp &= ~QMI_INTR_EN_SINGLE_ECC;
+ iowrite32be(tmp, &fman->qmi_regs->fmqm_ien);
+ break;
+ case FMAN_EX_QMI_DOUBLE_ECC:
+ tmp = ioread32be(&fman->qmi_regs->fmqm_eien);
+ if (enable)
+ tmp |= QMI_ERR_INTR_EN_DOUBLE_ECC;
+ else
+ tmp &= ~QMI_ERR_INTR_EN_DOUBLE_ECC;
+ iowrite32be(tmp, &fman->qmi_regs->fmqm_eien);
+ break;
+ case FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID:
+ tmp = ioread32be(&fman->qmi_regs->fmqm_eien);
+ if (enable)
+ tmp |= QMI_ERR_INTR_EN_DEQ_FROM_DEF;
+ else
+ tmp &= ~QMI_ERR_INTR_EN_DEQ_FROM_DEF;
+ iowrite32be(tmp, &fman->qmi_regs->fmqm_eien);
+ break;
+ case FMAN_EX_BMI_LIST_RAM_ECC:
+ tmp = ioread32be(&fman->bmi_regs->fmbm_ier);
+ if (enable)
+ tmp |= BMI_ERR_INTR_EN_LIST_RAM_ECC;
+ else
+ tmp &= ~BMI_ERR_INTR_EN_LIST_RAM_ECC;
+ iowrite32be(tmp, &fman->bmi_regs->fmbm_ier);
+ break;
+ case FMAN_EX_BMI_STORAGE_PROFILE_ECC:
+ tmp = ioread32be(&fman->bmi_regs->fmbm_ier);
+ if (enable)
+ tmp |= BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC;
+ else
+ tmp &= ~BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC;
+ iowrite32be(tmp, &fman->bmi_regs->fmbm_ier);
+ break;
+ case FMAN_EX_BMI_STATISTICS_RAM_ECC:
+ tmp = ioread32be(&fman->bmi_regs->fmbm_ier);
+ if (enable)
+ tmp |= BMI_ERR_INTR_EN_STATISTICS_RAM_ECC;
+ else
+ tmp &= ~BMI_ERR_INTR_EN_STATISTICS_RAM_ECC;
+ iowrite32be(tmp, &fman->bmi_regs->fmbm_ier);
+ break;
+ case FMAN_EX_BMI_DISPATCH_RAM_ECC:
+ tmp = ioread32be(&fman->bmi_regs->fmbm_ier);
+ if (enable)
+ tmp |= BMI_ERR_INTR_EN_DISPATCH_RAM_ECC;
+ else
+ tmp &= ~BMI_ERR_INTR_EN_DISPATCH_RAM_ECC;
+ iowrite32be(tmp, &fman->bmi_regs->fmbm_ier);
+ break;
+ case FMAN_EX_IRAM_ECC:
+ tmp = ioread32be(&fman->fpm_regs->fm_rie);
+ if (enable) {
+ /* enable ECC if not enabled */
+ enable_rams_ecc(fman->fpm_regs);
+ /* enable ECC interrupts */
+ tmp |= FPM_IRAM_ECC_ERR_EX_EN;
+ } else {
+ /* ECC mechanism may be disabled,
+ * depending on driver status
+ */
+ disable_rams_ecc(fman->fpm_regs);
+ tmp &= ~FPM_IRAM_ECC_ERR_EX_EN;
+ }
+ iowrite32be(tmp, &fman->fpm_regs->fm_rie);
+ break;
+ case FMAN_EX_MURAM_ECC:
+ tmp = ioread32be(&fman->fpm_regs->fm_rie);
+ if (enable) {
+ /* enable ECC if not enabled */
+ enable_rams_ecc(fman->fpm_regs);
+ /* enable ECC interrupts */
+ tmp |= FPM_MURAM_ECC_ERR_EX_EN;
+ } else {
+ /* ECC mechanism may be disabled,
+ * depending on driver status
+ */
+ disable_rams_ecc(fman->fpm_regs);
+ tmp &= ~FPM_MURAM_ECC_ERR_EX_EN;
+ }
+ iowrite32be(tmp, &fman->fpm_regs->fm_rie);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void resume(struct fman_fpm_regs __iomem *fpm_rg)
+{
+ u32 tmp;
+
+ tmp = ioread32be(&fpm_rg->fmfp_ee);
+ /* clear tmp_reg event bits in order not to clear standing events */
+ tmp &= ~(FPM_EV_MASK_DOUBLE_ECC |
+ FPM_EV_MASK_STALL | FPM_EV_MASK_SINGLE_ECC);
+ tmp |= FPM_EV_MASK_RELEASE_FM;
+
+ iowrite32be(tmp, &fpm_rg->fmfp_ee);
+}
+
+static int fill_soc_specific_params(struct fman_state_struct *state)
+{
+ u8 minor = state->rev_info.minor;
+ /* P4080 - Major 2
+ * P2041/P3041/P5020/P5040 - Major 3
+ * Tx/Bx - Major 6
+ */
+ switch (state->rev_info.major) {
+ case 3:
+ state->bmi_max_fifo_size = 160 * 1024;
+ state->fm_iram_size = 64 * 1024;
+ state->dma_thresh_max_commq = 31;
+ state->dma_thresh_max_buf = 127;
+ state->qmi_max_num_of_tnums = 64;
+ state->qmi_def_tnums_thresh = 48;
+ state->bmi_max_num_of_tasks = 128;
+ state->max_num_of_open_dmas = 32;
+ state->fm_port_num_of_cg = 256;
+ state->num_of_rx_ports = 6;
+ state->total_fifo_size = 122 * 1024;
+ break;
+
+ case 2:
+ state->bmi_max_fifo_size = 160 * 1024;
+ state->fm_iram_size = 64 * 1024;
+ state->dma_thresh_max_commq = 31;
+ state->dma_thresh_max_buf = 127;
+ state->qmi_max_num_of_tnums = 64;
+ state->qmi_def_tnums_thresh = 48;
+ state->bmi_max_num_of_tasks = 128;
+ state->max_num_of_open_dmas = 32;
+ state->fm_port_num_of_cg = 256;
+ state->num_of_rx_ports = 5;
+ state->total_fifo_size = 100 * 1024;
+ break;
+
+ case 6:
+ state->dma_thresh_max_commq = 83;
+ state->dma_thresh_max_buf = 127;
+ state->qmi_max_num_of_tnums = 64;
+ state->qmi_def_tnums_thresh = 32;
+ state->fm_port_num_of_cg = 256;
+
+ /* FManV3L */
+ if (minor == 1 || minor == 4) {
+ state->bmi_max_fifo_size = 192 * 1024;
+ state->bmi_max_num_of_tasks = 64;
+ state->max_num_of_open_dmas = 32;
+ state->num_of_rx_ports = 5;
+ if (minor == 1)
+ state->fm_iram_size = 32 * 1024;
+ else
+ state->fm_iram_size = 64 * 1024;
+ state->total_fifo_size = 156 * 1024;
+ }
+ /* FManV3H */
+ else if (minor == 0 || minor == 2 || minor == 3) {
+ state->bmi_max_fifo_size = 384 * 1024;
+ state->fm_iram_size = 64 * 1024;
+ state->bmi_max_num_of_tasks = 128;
+ state->max_num_of_open_dmas = 84;
+ state->num_of_rx_ports = 8;
+ state->total_fifo_size = 295 * 1024;
+ } else {
+ pr_err("Unsupported FManv3 version\n");
+ return -EINVAL;
+ }
+
+ break;
+ default:
+ pr_err("Unsupported FMan version\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static bool is_init_done(struct fman_cfg *cfg)
+{
+ /* Checks if FMan driver parameters were initialized */
+ if (!cfg)
+ return true;
+
+ return false;
+}
+
+static void free_init_resources(struct fman *fman)
+{
+ if (fman->cam_offset)
+ fman_muram_free_mem(fman->muram, fman->cam_offset,
+ fman->cam_size);
+ if (fman->fifo_offset)
+ fman_muram_free_mem(fman->muram, fman->fifo_offset,
+ fman->fifo_size);
+}
+
+static irqreturn_t bmi_err_event(struct fman *fman)
+{
+ u32 event, mask, force;
+ struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs;
+ irqreturn_t ret = IRQ_NONE;
+
+ event = ioread32be(&bmi_rg->fmbm_ievr);
+ mask = ioread32be(&bmi_rg->fmbm_ier);
+ event &= mask;
+ /* clear the forced events */
+ force = ioread32be(&bmi_rg->fmbm_ifr);
+ if (force & event)
+ iowrite32be(force & ~event, &bmi_rg->fmbm_ifr);
+ /* clear the acknowledged events */
+ iowrite32be(event, &bmi_rg->fmbm_ievr);
+
+ if (event & BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC)
+ ret = fman->exception_cb(fman, FMAN_EX_BMI_STORAGE_PROFILE_ECC);
+ if (event & BMI_ERR_INTR_EN_LIST_RAM_ECC)
+ ret = fman->exception_cb(fman, FMAN_EX_BMI_LIST_RAM_ECC);
+ if (event & BMI_ERR_INTR_EN_STATISTICS_RAM_ECC)
+ ret = fman->exception_cb(fman, FMAN_EX_BMI_STATISTICS_RAM_ECC);
+ if (event & BMI_ERR_INTR_EN_DISPATCH_RAM_ECC)
+ ret = fman->exception_cb(fman, FMAN_EX_BMI_DISPATCH_RAM_ECC);
+
+ return ret;
+}
+
+static irqreturn_t qmi_err_event(struct fman *fman)
+{
+ u32 event, mask, force;
+ struct fman_qmi_regs __iomem *qmi_rg = fman->qmi_regs;
+ irqreturn_t ret = IRQ_NONE;
+
+ event = ioread32be(&qmi_rg->fmqm_eie);
+ mask = ioread32be(&qmi_rg->fmqm_eien);
+ event &= mask;
+
+ /* clear the forced events */
+ force = ioread32be(&qmi_rg->fmqm_eif);
+ if (force & event)
+ iowrite32be(force & ~event, &qmi_rg->fmqm_eif);
+ /* clear the acknowledged events */
+ iowrite32be(event, &qmi_rg->fmqm_eie);
+
+ if (event & QMI_ERR_INTR_EN_DOUBLE_ECC)
+ ret = fman->exception_cb(fman, FMAN_EX_QMI_DOUBLE_ECC);
+ if (event & QMI_ERR_INTR_EN_DEQ_FROM_DEF)
+ ret = fman->exception_cb(fman,
+ FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID);
+
+ return ret;
+}
+
+static irqreturn_t dma_err_event(struct fman *fman)
+{
+ u32 status, mask, com_id;
+ u8 tnum, port_id, relative_port_id;
+ u16 liodn;
+ struct fman_dma_regs __iomem *dma_rg = fman->dma_regs;
+ irqreturn_t ret = IRQ_NONE;
+
+ status = ioread32be(&dma_rg->fmdmsr);
+ mask = ioread32be(&dma_rg->fmdmmr);
+
+ /* clear DMA_STATUS_BUS_ERR if mask has no DMA_MODE_BER */
+ if ((mask & DMA_MODE_BER) != DMA_MODE_BER)
+ status &= ~DMA_STATUS_BUS_ERR;
+
+ /* clear relevant bits if mask has no DMA_MODE_ECC */
+ if ((mask & DMA_MODE_ECC) != DMA_MODE_ECC)
+ status &= ~(DMA_STATUS_FM_SPDAT_ECC |
+ DMA_STATUS_READ_ECC |
+ DMA_STATUS_SYSTEM_WRITE_ECC |
+ DMA_STATUS_FM_WRITE_ECC);
+
+ /* clear set events */
+ iowrite32be(status, &dma_rg->fmdmsr);
+
+ if (status & DMA_STATUS_BUS_ERR) {
+ u64 addr;
+
+ addr = (u64)ioread32be(&dma_rg->fmdmtal);
+ addr |= ((u64)(ioread32be(&dma_rg->fmdmtah)) << 32);
+
+ com_id = ioread32be(&dma_rg->fmdmtcid);
+ port_id = (u8)(((com_id & DMA_TRANSFER_PORTID_MASK) >>
+ DMA_TRANSFER_PORTID_SHIFT));
+ relative_port_id =
+ hw_port_id_to_sw_port_id(fman->state->rev_info.major, port_id);
+ tnum = (u8)((com_id & DMA_TRANSFER_TNUM_MASK) >>
+ DMA_TRANSFER_TNUM_SHIFT);
+ liodn = (u16)(com_id & DMA_TRANSFER_LIODN_MASK);
+ ret = fman->bus_error_cb(fman, relative_port_id, addr, tnum,
+ liodn);
+ }
+ if (status & DMA_STATUS_FM_SPDAT_ECC)
+ ret = fman->exception_cb(fman, FMAN_EX_DMA_SINGLE_PORT_ECC);
+ if (status & DMA_STATUS_READ_ECC)
+ ret = fman->exception_cb(fman, FMAN_EX_DMA_READ_ECC);
+ if (status & DMA_STATUS_SYSTEM_WRITE_ECC)
+ ret = fman->exception_cb(fman, FMAN_EX_DMA_SYSTEM_WRITE_ECC);
+ if (status & DMA_STATUS_FM_WRITE_ECC)
+ ret = fman->exception_cb(fman, FMAN_EX_DMA_FM_WRITE_ECC);
+
+ return ret;
+}
+
+static irqreturn_t fpm_err_event(struct fman *fman)
+{
+ u32 event;
+ struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs;
+ irqreturn_t ret = IRQ_NONE;
+
+ event = ioread32be(&fpm_rg->fmfp_ee);
+ /* clear the all occurred events */
+ iowrite32be(event, &fpm_rg->fmfp_ee);
+
+ if ((event & FPM_EV_MASK_DOUBLE_ECC) &&
+ (event & FPM_EV_MASK_DOUBLE_ECC_EN))
+ ret = fman->exception_cb(fman, FMAN_EX_FPM_DOUBLE_ECC);
+ if ((event & FPM_EV_MASK_STALL) && (event & FPM_EV_MASK_STALL_EN))
+ ret = fman->exception_cb(fman, FMAN_EX_FPM_STALL_ON_TASKS);
+ if ((event & FPM_EV_MASK_SINGLE_ECC) &&
+ (event & FPM_EV_MASK_SINGLE_ECC_EN))
+ ret = fman->exception_cb(fman, FMAN_EX_FPM_SINGLE_ECC);
+
+ return ret;
+}
+
+static irqreturn_t muram_err_intr(struct fman *fman)
+{
+ u32 event, mask;
+ struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs;
+ irqreturn_t ret = IRQ_NONE;
+
+ event = ioread32be(&fpm_rg->fm_rcr);
+ mask = ioread32be(&fpm_rg->fm_rie);
+
+ /* clear MURAM event bit (do not clear IRAM event) */
+ iowrite32be(event & ~FPM_RAM_IRAM_ECC, &fpm_rg->fm_rcr);
+
+ if ((mask & FPM_MURAM_ECC_ERR_EX_EN) && (event & FPM_RAM_MURAM_ECC))
+ ret = fman->exception_cb(fman, FMAN_EX_MURAM_ECC);
+
+ return ret;
+}
+
+static irqreturn_t qmi_event(struct fman *fman)
+{
+ u32 event, mask, force;
+ struct fman_qmi_regs __iomem *qmi_rg = fman->qmi_regs;
+ irqreturn_t ret = IRQ_NONE;
+
+ event = ioread32be(&qmi_rg->fmqm_ie);
+ mask = ioread32be(&qmi_rg->fmqm_ien);
+ event &= mask;
+ /* clear the forced events */
+ force = ioread32be(&qmi_rg->fmqm_if);
+ if (force & event)
+ iowrite32be(force & ~event, &qmi_rg->fmqm_if);
+ /* clear the acknowledged events */
+ iowrite32be(event, &qmi_rg->fmqm_ie);
+
+ if (event & QMI_INTR_EN_SINGLE_ECC)
+ ret = fman->exception_cb(fman, FMAN_EX_QMI_SINGLE_ECC);
+
+ return ret;
+}
+
+static void enable_time_stamp(struct fman *fman)
+{
+ struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs;
+ u16 fm_clk_freq = fman->state->fm_clk_freq;
+ u32 tmp, intgr, ts_freq;
+ u64 frac;
+
+ ts_freq = (u32)(1 << fman->state->count1_micro_bit);
+ /* configure timestamp so that bit 8 will count 1 microsecond
+ * Find effective count rate at TIMESTAMP least significant bits:
+ * Effective_Count_Rate = 1MHz x 2^8 = 256MHz
+ * Find frequency ratio between effective count rate and the clock:
+ * Effective_Count_Rate / CLK e.g. for 600 MHz clock:
+ * 256/600 = 0.4266666...
+ */
+
+ intgr = ts_freq / fm_clk_freq;
+ /* we multiply by 2^16 to keep the fraction of the division
+ * we do not div back, since we write this value as a fraction
+ * see spec
+ */
+
+ frac = ((ts_freq << 16) - (intgr << 16) * fm_clk_freq) / fm_clk_freq;
+ /* we check remainder of the division in order to round up if not int */
+ if (((ts_freq << 16) - (intgr << 16) * fm_clk_freq) % fm_clk_freq)
+ frac++;
+
+ tmp = (intgr << FPM_TS_INT_SHIFT) | (u16)frac;
+ iowrite32be(tmp, &fpm_rg->fmfp_tsc2);
+
+ /* enable timestamp with original clock */
+ iowrite32be(FPM_TS_CTL_EN, &fpm_rg->fmfp_tsc1);
+ fman->state->enabled_time_stamp = true;
+}
+
+static int clear_iram(struct fman *fman)
+{
+ struct fman_iram_regs __iomem *iram;
+ int i, count;
+
+ iram = fman->base_addr + IMEM_OFFSET;
+
+ /* Enable the auto-increment */
+ iowrite32be(IRAM_IADD_AIE, &iram->iadd);
+ count = 100;
+ do {
+ udelay(1);
+ } while ((ioread32be(&iram->iadd) != IRAM_IADD_AIE) && --count);
+ if (count == 0)
+ return -EBUSY;
+
+ for (i = 0; i < (fman->state->fm_iram_size / 4); i++)
+ iowrite32be(0xffffffff, &iram->idata);
+
+ iowrite32be(fman->state->fm_iram_size - 4, &iram->iadd);
+ count = 100;
+ do {
+ udelay(1);
+ } while ((ioread32be(&iram->idata) != 0xffffffff) && --count);
+ if (count == 0)
+ return -EBUSY;
+
+ return 0;
+}
+
+static u32 get_exception_flag(enum fman_exceptions exception)
+{
+ u32 bit_mask;
+
+ switch (exception) {
+ case FMAN_EX_DMA_BUS_ERROR:
+ bit_mask = EX_DMA_BUS_ERROR;
+ break;
+ case FMAN_EX_DMA_SINGLE_PORT_ECC:
+ bit_mask = EX_DMA_SINGLE_PORT_ECC;
+ break;
+ case FMAN_EX_DMA_READ_ECC:
+ bit_mask = EX_DMA_READ_ECC;
+ break;
+ case FMAN_EX_DMA_SYSTEM_WRITE_ECC:
+ bit_mask = EX_DMA_SYSTEM_WRITE_ECC;
+ break;
+ case FMAN_EX_DMA_FM_WRITE_ECC:
+ bit_mask = EX_DMA_FM_WRITE_ECC;
+ break;
+ case FMAN_EX_FPM_STALL_ON_TASKS:
+ bit_mask = EX_FPM_STALL_ON_TASKS;
+ break;
+ case FMAN_EX_FPM_SINGLE_ECC:
+ bit_mask = EX_FPM_SINGLE_ECC;
+ break;
+ case FMAN_EX_FPM_DOUBLE_ECC:
+ bit_mask = EX_FPM_DOUBLE_ECC;
+ break;
+ case FMAN_EX_QMI_SINGLE_ECC:
+ bit_mask = EX_QMI_SINGLE_ECC;
+ break;
+ case FMAN_EX_QMI_DOUBLE_ECC:
+ bit_mask = EX_QMI_DOUBLE_ECC;
+ break;
+ case FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID:
+ bit_mask = EX_QMI_DEQ_FROM_UNKNOWN_PORTID;
+ break;
+ case FMAN_EX_BMI_LIST_RAM_ECC:
+ bit_mask = EX_BMI_LIST_RAM_ECC;
+ break;
+ case FMAN_EX_BMI_STORAGE_PROFILE_ECC:
+ bit_mask = EX_BMI_STORAGE_PROFILE_ECC;
+ break;
+ case FMAN_EX_BMI_STATISTICS_RAM_ECC:
+ bit_mask = EX_BMI_STATISTICS_RAM_ECC;
+ break;
+ case FMAN_EX_BMI_DISPATCH_RAM_ECC:
+ bit_mask = EX_BMI_DISPATCH_RAM_ECC;
+ break;
+ case FMAN_EX_MURAM_ECC:
+ bit_mask = EX_MURAM_ECC;
+ break;
+ default:
+ bit_mask = 0;
+ break;
+ }
+
+ return bit_mask;
+}
+
+static int get_module_event(enum fman_event_modules module, u8 mod_id,
+ enum fman_intr_type intr_type)
+{
+ int event;
+
+ switch (module) {
+ case FMAN_MOD_MAC:
+ if (intr_type == FMAN_INTR_TYPE_ERR)
+ event = FMAN_EV_ERR_MAC0 + mod_id;
+ else
+ event = FMAN_EV_MAC0 + mod_id;
+ break;
+ case FMAN_MOD_FMAN_CTRL:
+ if (intr_type == FMAN_INTR_TYPE_ERR)
+ event = FMAN_EV_CNT;
+ else
+ event = (FMAN_EV_FMAN_CTRL_0 + mod_id);
+ break;
+ case FMAN_MOD_DUMMY_LAST:
+ event = FMAN_EV_CNT;
+ break;
+ default:
+ event = FMAN_EV_CNT;
+ break;
+ }
+
+ return event;
+}
+
+static int set_size_of_fifo(struct fman *fman, u8 port_id, u32 *size_of_fifo,
+ u32 *extra_size_of_fifo)
+{
+ struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs;
+ u32 fifo = *size_of_fifo;
+ u32 extra_fifo = *extra_size_of_fifo;
+ u32 tmp;
+
+ /* if this is the first time a port requires extra_fifo_pool_size,
+ * the total extra_fifo_pool_size must be initialized to 1 buffer per
+ * port
+ */
+ if (extra_fifo && !fman->state->extra_fifo_pool_size)
+ fman->state->extra_fifo_pool_size =
+ fman->state->num_of_rx_ports * FMAN_BMI_FIFO_UNITS;
+
+ fman->state->extra_fifo_pool_size =
+ max(fman->state->extra_fifo_pool_size, extra_fifo);
+
+ /* check that there are enough uncommitted fifo size */
+ if ((fman->state->accumulated_fifo_size + fifo) >
+ (fman->state->total_fifo_size -
+ fman->state->extra_fifo_pool_size)) {
+ dev_err(fman->dev, "%s: Requested fifo size and extra size exceed total FIFO size.\n",
+ __func__);
+ return -EAGAIN;
+ }
+
+ /* Read, modify and write to HW */
+ tmp = (fifo / FMAN_BMI_FIFO_UNITS - 1) |
+ ((extra_fifo / FMAN_BMI_FIFO_UNITS) <<
+ BMI_EXTRA_FIFO_SIZE_SHIFT);
+ iowrite32be(tmp, &bmi_rg->fmbm_pfs[port_id - 1]);
+
+ /* update accumulated */
+ fman->state->accumulated_fifo_size += fifo;
+
+ return 0;
+}
+
+static int set_num_of_tasks(struct fman *fman, u8 port_id, u8 *num_of_tasks,
+ u8 *num_of_extra_tasks)
+{
+ struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs;
+ u8 tasks = *num_of_tasks;
+ u8 extra_tasks = *num_of_extra_tasks;
+ u32 tmp;
+
+ if (extra_tasks)
+ fman->state->extra_tasks_pool_size =
+ max(fman->state->extra_tasks_pool_size, extra_tasks);
+
+ /* check that there are enough uncommitted tasks */
+ if ((fman->state->accumulated_num_of_tasks + tasks) >
+ (fman->state->total_num_of_tasks -
+ fman->state->extra_tasks_pool_size)) {
+ dev_err(fman->dev, "%s: Requested num_of_tasks and extra tasks pool for fm%d exceed total num_of_tasks.\n",
+ __func__, fman->state->fm_id);
+ return -EAGAIN;
+ }
+ /* update accumulated */
+ fman->state->accumulated_num_of_tasks += tasks;
+
+ /* Write to HW */
+ tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]) &
+ ~(BMI_NUM_OF_TASKS_MASK | BMI_NUM_OF_EXTRA_TASKS_MASK);
+ tmp |= ((u32)((tasks - 1) << BMI_NUM_OF_TASKS_SHIFT) |
+ (u32)(extra_tasks << BMI_EXTRA_NUM_OF_TASKS_SHIFT));
+ iowrite32be(tmp, &bmi_rg->fmbm_pp[port_id - 1]);
+
+ return 0;
+}
+
+static int set_num_of_open_dmas(struct fman *fman, u8 port_id,
+ u8 *num_of_open_dmas,
+ u8 *num_of_extra_open_dmas)
+{
+ struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs;
+ u8 open_dmas = *num_of_open_dmas;
+ u8 extra_open_dmas = *num_of_extra_open_dmas;
+ u8 total_num_dmas = 0, current_val = 0, current_extra_val = 0;
+ u32 tmp;
+
+ if (!open_dmas) {
+ /* Configuration according to values in the HW.
+ * read the current number of open Dma's
+ */
+ tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]);
+ current_extra_val = (u8)((tmp & BMI_NUM_OF_EXTRA_DMAS_MASK) >>
+ BMI_EXTRA_NUM_OF_DMAS_SHIFT);
+
+ tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]);
+ current_val = (u8)(((tmp & BMI_NUM_OF_DMAS_MASK) >>
+ BMI_NUM_OF_DMAS_SHIFT) + 1);
+
+ /* This is the first configuration and user did not
+ * specify value (!open_dmas), reset values will be used
+ * and we just save these values for resource management
+ */
+ fman->state->extra_open_dmas_pool_size =
+ (u8)max(fman->state->extra_open_dmas_pool_size,
+ current_extra_val);
+ fman->state->accumulated_num_of_open_dmas += current_val;
+ *num_of_open_dmas = current_val;
+ *num_of_extra_open_dmas = current_extra_val;
+ return 0;
+ }
+
+ if (extra_open_dmas > current_extra_val)
+ fman->state->extra_open_dmas_pool_size =
+ (u8)max(fman->state->extra_open_dmas_pool_size,
+ extra_open_dmas);
+
+ if ((fman->state->rev_info.major < 6) &&
+ (fman->state->accumulated_num_of_open_dmas - current_val +
+ open_dmas > fman->state->max_num_of_open_dmas)) {
+ dev_err(fman->dev, "%s: Requested num_of_open_dmas for fm%d exceeds total num_of_open_dmas.\n",
+ __func__, fman->state->fm_id);
+ return -EAGAIN;
+ } else if ((fman->state->rev_info.major >= 6) &&
+ !((fman->state->rev_info.major == 6) &&
+ (fman->state->rev_info.minor == 0)) &&
+ (fman->state->accumulated_num_of_open_dmas -
+ current_val + open_dmas >
+ fman->state->dma_thresh_max_commq + 1)) {
+ dev_err(fman->dev, "%s: Requested num_of_open_dmas for fm%d exceeds DMA Command queue (%d)\n",
+ __func__, fman->state->fm_id,
+ fman->state->dma_thresh_max_commq + 1);
+ return -EAGAIN;
+ }
+
+ WARN_ON(fman->state->accumulated_num_of_open_dmas < current_val);
+ /* update acummulated */
+ fman->state->accumulated_num_of_open_dmas -= current_val;
+ fman->state->accumulated_num_of_open_dmas += open_dmas;
+
+ if (fman->state->rev_info.major < 6)
+ total_num_dmas =
+ (u8)(fman->state->accumulated_num_of_open_dmas +
+ fman->state->extra_open_dmas_pool_size);
+
+ /* calculate reg */
+ tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]) &
+ ~(BMI_NUM_OF_DMAS_MASK | BMI_NUM_OF_EXTRA_DMAS_MASK);
+ tmp |= (u32)(((open_dmas - 1) << BMI_NUM_OF_DMAS_SHIFT) |
+ (extra_open_dmas << BMI_EXTRA_NUM_OF_DMAS_SHIFT));
+ iowrite32be(tmp, &bmi_rg->fmbm_pp[port_id - 1]);
+
+ /* update total num of DMA's with committed number of open DMAS,
+ * and max uncommitted pool.
+ */
+ if (total_num_dmas) {
+ tmp = ioread32be(&bmi_rg->fmbm_cfg2) & ~BMI_CFG2_DMAS_MASK;
+ tmp |= (u32)(total_num_dmas - 1) << BMI_CFG2_DMAS_SHIFT;
+ iowrite32be(tmp, &bmi_rg->fmbm_cfg2);
+ }
+
+ return 0;
+}
+
+static int fman_config(struct fman *fman)
+{
+ void __iomem *base_addr;
+ int err;
+
+ base_addr = fman->dts_params.base_addr;
+
+ fman->state = kzalloc(sizeof(*fman->state), GFP_KERNEL);
+ if (!fman->state)
+ goto err_fm_state;
+
+ /* Allocate the FM driver's parameters structure */
+ fman->cfg = kzalloc(sizeof(*fman->cfg), GFP_KERNEL);
+ if (!fman->cfg)
+ goto err_fm_drv;
+
+ /* Initialize MURAM block */
+ fman->muram =
+ fman_muram_init(fman->dts_params.muram_res.start,
+ resource_size(&fman->dts_params.muram_res));
+ if (!fman->muram)
+ goto err_fm_soc_specific;
+
+ /* Initialize FM parameters which will be kept by the driver */
+ fman->state->fm_id = fman->dts_params.id;
+ fman->state->fm_clk_freq = fman->dts_params.clk_freq;
+ fman->state->qman_channel_base = fman->dts_params.qman_channel_base;
+ fman->state->num_of_qman_channels =
+ fman->dts_params.num_of_qman_channels;
+ fman->state->res = fman->dts_params.res;
+ fman->exception_cb = fman_exceptions;
+ fman->bus_error_cb = fman_bus_error;
+ fman->fpm_regs = base_addr + FPM_OFFSET;
+ fman->bmi_regs = base_addr + BMI_OFFSET;
+ fman->qmi_regs = base_addr + QMI_OFFSET;
+ fman->dma_regs = base_addr + DMA_OFFSET;
+ fman->base_addr = base_addr;
+
+ spin_lock_init(&fman->spinlock);
+ fman_defconfig(fman->cfg);
+
+ fman->state->extra_fifo_pool_size = 0;
+ fman->state->exceptions = (EX_DMA_BUS_ERROR |
+ EX_DMA_READ_ECC |
+ EX_DMA_SYSTEM_WRITE_ECC |
+ EX_DMA_FM_WRITE_ECC |
+ EX_FPM_STALL_ON_TASKS |
+ EX_FPM_SINGLE_ECC |
+ EX_FPM_DOUBLE_ECC |
+ EX_QMI_DEQ_FROM_UNKNOWN_PORTID |
+ EX_BMI_LIST_RAM_ECC |
+ EX_BMI_STORAGE_PROFILE_ECC |
+ EX_BMI_STATISTICS_RAM_ECC |
+ EX_MURAM_ECC |
+ EX_BMI_DISPATCH_RAM_ECC |
+ EX_QMI_DOUBLE_ECC |
+ EX_QMI_SINGLE_ECC);
+
+ /* Read FMan revision for future use*/
+ fman_get_revision(fman, &fman->state->rev_info);
+
+ err = fill_soc_specific_params(fman->state);
+ if (err)
+ goto err_fm_soc_specific;
+
+ /* FM_AID_MODE_NO_TNUM_SW005 Errata workaround */
+ if (fman->state->rev_info.major >= 6)
+ fman->cfg->dma_aid_mode = FMAN_DMA_AID_OUT_PORT_ID;
+
+ fman->cfg->qmi_def_tnums_thresh = fman->state->qmi_def_tnums_thresh;
+
+ fman->state->total_num_of_tasks =
+ (u8)DFLT_TOTAL_NUM_OF_TASKS(fman->state->rev_info.major,
+ fman->state->rev_info.minor,
+ fman->state->bmi_max_num_of_tasks);
+
+ if (fman->state->rev_info.major < 6) {
+ fman->cfg->dma_comm_qtsh_clr_emer =
+ (u8)DFLT_DMA_COMM_Q_LOW(fman->state->rev_info.major,
+ fman->state->dma_thresh_max_commq);
+
+ fman->cfg->dma_comm_qtsh_asrt_emer =
+ (u8)DFLT_DMA_COMM_Q_HIGH(fman->state->rev_info.major,
+ fman->state->dma_thresh_max_commq);
+
+ fman->cfg->dma_cam_num_of_entries =
+ DFLT_DMA_CAM_NUM_OF_ENTRIES(fman->state->rev_info.major);
+
+ fman->cfg->dma_read_buf_tsh_clr_emer =
+ DFLT_DMA_READ_INT_BUF_LOW(fman->state->dma_thresh_max_buf);
+
+ fman->cfg->dma_read_buf_tsh_asrt_emer =
+ DFLT_DMA_READ_INT_BUF_HIGH(fman->state->dma_thresh_max_buf);
+
+ fman->cfg->dma_write_buf_tsh_clr_emer =
+ DFLT_DMA_WRITE_INT_BUF_LOW(fman->state->dma_thresh_max_buf);
+
+ fman->cfg->dma_write_buf_tsh_asrt_emer =
+ DFLT_DMA_WRITE_INT_BUF_HIGH(fman->state->dma_thresh_max_buf);
+
+ fman->cfg->dma_axi_dbg_num_of_beats =
+ DFLT_AXI_DBG_NUM_OF_BEATS;
+ }
+
+ return 0;
+
+err_fm_soc_specific:
+ kfree(fman->cfg);
+err_fm_drv:
+ kfree(fman->state);
+err_fm_state:
+ kfree(fman);
+ return -EINVAL;
+}
+
+static int fman_init(struct fman *fman)
+{
+ struct fman_cfg *cfg = NULL;
+ int err = 0, i, count;
+
+ if (is_init_done(fman->cfg))
+ return -EINVAL;
+
+ fman->state->count1_micro_bit = FM_TIMESTAMP_1_USEC_BIT;
+
+ cfg = fman->cfg;
+
+ /* clear revision-dependent non existing exception */
+ if (fman->state->rev_info.major < 6)
+ fman->state->exceptions &= ~FMAN_EX_BMI_DISPATCH_RAM_ECC;
+
+ if (fman->state->rev_info.major >= 6)
+ fman->state->exceptions &= ~FMAN_EX_QMI_SINGLE_ECC;
+
+ /* clear CPG */
+ memset_io((void __iomem *)(fman->base_addr + CGP_OFFSET), 0,
+ fman->state->fm_port_num_of_cg);
+
+ /* Save LIODN info before FMan reset
+ * Skipping non-existent port 0 (i = 1)
+ */
+ for (i = 1; i < FMAN_LIODN_TBL; i++) {
+ u32 liodn_base;
+
+ fman->liodn_offset[i] =
+ ioread32be(&fman->bmi_regs->fmbm_spliodn[i - 1]);
+ liodn_base = ioread32be(&fman->dma_regs->fmdmplr[i / 2]);
+ if (i % 2) {
+ /* FMDM_PLR LSB holds LIODN base for odd ports */
+ liodn_base &= DMA_LIODN_BASE_MASK;
+ } else {
+ /* FMDM_PLR MSB holds LIODN base for even ports */
+ liodn_base >>= DMA_LIODN_SHIFT;
+ liodn_base &= DMA_LIODN_BASE_MASK;
+ }
+ fman->liodn_base[i] = liodn_base;
+ }
+
+ /* FMan Reset (supported only for FMan V2) */
+ if (fman->state->rev_info.major >= 6) {
+ /* Errata A007273 */
+ dev_dbg(fman->dev, "%s: FManV3 reset is not supported!\n",
+ __func__);
+ } else {
+ iowrite32be(FPM_RSTC_FM_RESET, &fman->fpm_regs->fm_rstc);
+ /* Wait for reset completion */
+ count = 100;
+ do {
+ udelay(1);
+ } while (((ioread32be(&fman->fpm_regs->fm_rstc)) &
+ FPM_RSTC_FM_RESET) && --count);
+ if (count == 0)
+ return -EBUSY;
+ }
+
+ if (ioread32be(&fman->qmi_regs->fmqm_gs) & QMI_GS_HALT_NOT_BUSY) {
+ resume(fman->fpm_regs);
+ /* Wait until QMI is not in halt not busy state */
+ count = 100;
+ do {
+ udelay(1);
+ } while (((ioread32be(&fman->qmi_regs->fmqm_gs)) &
+ QMI_GS_HALT_NOT_BUSY) && --count);
+ if (count == 0)
+ dev_warn(fman->dev, "%s: QMI is in halt not busy state\n",
+ __func__);
+ }
+
+ if (clear_iram(fman) != 0)
+ return -EINVAL;
+
+ cfg->exceptions = fman->state->exceptions;
+
+ /* Init DMA Registers */
+
+ err = dma_init(fman);
+ if (err != 0) {
+ free_init_resources(fman);
+ return err;
+ }
+
+ /* Init FPM Registers */
+ fpm_init(fman->fpm_regs, fman->cfg);
+
+ /* define common resources */
+ /* allocate MURAM for FIFO according to total size */
+ fman->fifo_offset = fman_muram_alloc(fman->muram,
+ fman->state->total_fifo_size);
+ if (IS_ERR_VALUE(fman->cam_offset)) {
+ free_init_resources(fman);
+ dev_err(fman->dev, "%s: MURAM alloc for BMI FIFO failed\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ cfg->fifo_base_addr = fman->fifo_offset;
+ cfg->total_fifo_size = fman->state->total_fifo_size;
+ cfg->total_num_of_tasks = fman->state->total_num_of_tasks;
+ cfg->clk_freq = fman->state->fm_clk_freq;
+
+ /* Init BMI Registers */
+ bmi_init(fman->bmi_regs, fman->cfg);
+
+ /* Init QMI Registers */
+ qmi_init(fman->qmi_regs, fman->cfg);
+
+ err = enable(fman, cfg);
+ if (err != 0)
+ return err;
+
+ enable_time_stamp(fman);
+
+ kfree(fman->cfg);
+ fman->cfg = NULL;
+
+ return 0;
+}
+
+static int fman_set_exception(struct fman *fman,
+ enum fman_exceptions exception, bool enable)
+{
+ u32 bit_mask = 0;
+
+ if (!is_init_done(fman->cfg))
+ return -EINVAL;
+
+ bit_mask = get_exception_flag(exception);
+ if (bit_mask) {
+ if (enable)
+ fman->state->exceptions |= bit_mask;
+ else
+ fman->state->exceptions &= ~bit_mask;
+ } else {
+ dev_err(fman->dev, "%s: Undefined exception (%d)\n",
+ __func__, exception);
+ return -EINVAL;
+ }
+
+ return set_exception(fman, exception, enable);
+}
+
+/**
+ * fman_register_intr
+ * @fman: A Pointer to FMan device
+ * @mod: Calling module
+ * @mod_id: Module id (if more than 1 exists, '0' if not)
+ * @intr_type: Interrupt type (error/normal) selection.
+ * @f_isr: The interrupt service routine.
+ * @h_src_arg: Argument to be passed to f_isr.
+ *
+ * Used to register an event handler to be processed by FMan
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+void fman_register_intr(struct fman *fman, enum fman_event_modules module,
+ u8 mod_id, enum fman_intr_type intr_type,
+ void (*isr_cb)(void *src_arg), void *src_arg)
+{
+ int event = 0;
+
+ event = get_module_event(module, mod_id, intr_type);
+ WARN_ON(event >= FMAN_EV_CNT);
+
+ /* register in local FM structure */
+ fman->intr_mng[event].isr_cb = isr_cb;
+ fman->intr_mng[event].src_handle = src_arg;
+}
+
+/**
+ * fman_unregister_intr
+ * @fman: A Pointer to FMan device
+ * @mod: Calling module
+ * @mod_id: Module id (if more than 1 exists, '0' if not)
+ * @intr_type: Interrupt type (error/normal) selection.
+ *
+ * Used to unregister an event handler to be processed by FMan
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+void fman_unregister_intr(struct fman *fman, enum fman_event_modules module,
+ u8 mod_id, enum fman_intr_type intr_type)
+{
+ int event = 0;
+
+ event = get_module_event(module, mod_id, intr_type);
+ WARN_ON(event >= FMAN_EV_CNT);
+
+ fman->intr_mng[event].isr_cb = NULL;
+ fman->intr_mng[event].src_handle = NULL;
+}
+
+/**
+ * fman_set_port_params
+ * @fman: A Pointer to FMan device
+ * @port_params: Port parameters
+ *
+ * Used by FMan Port to pass parameters to the FMan
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+int fman_set_port_params(struct fman *fman,
+ struct fman_port_init_params *port_params)
+{
+ int err;
+ unsigned long flags;
+ u8 port_id = port_params->port_id, mac_id;
+
+ spin_lock_irqsave(&fman->spinlock, flags);
+
+ err = set_num_of_tasks(fman, port_params->port_id,
+ &port_params->num_of_tasks,
+ &port_params->num_of_extra_tasks);
+ if (err)
+ goto return_err;
+
+ /* TX Ports */
+ if (port_params->port_type != FMAN_PORT_TYPE_RX) {
+ u32 enq_th, deq_th, reg;
+
+ /* update qmi ENQ/DEQ threshold */
+ fman->state->accumulated_num_of_deq_tnums +=
+ port_params->deq_pipeline_depth;
+ enq_th = (ioread32be(&fman->qmi_regs->fmqm_gc) &
+ QMI_CFG_ENQ_MASK) >> QMI_CFG_ENQ_SHIFT;
+ /* if enq_th is too big, we reduce it to the max value
+ * that is still 0
+ */
+ if (enq_th >= (fman->state->qmi_max_num_of_tnums -
+ fman->state->accumulated_num_of_deq_tnums)) {
+ enq_th =
+ fman->state->qmi_max_num_of_tnums -
+ fman->state->accumulated_num_of_deq_tnums - 1;
+
+ reg = ioread32be(&fman->qmi_regs->fmqm_gc);
+ reg &= ~QMI_CFG_ENQ_MASK;
+ reg |= (enq_th << QMI_CFG_ENQ_SHIFT);
+ iowrite32be(reg, &fman->qmi_regs->fmqm_gc);
+ }
+
+ deq_th = ioread32be(&fman->qmi_regs->fmqm_gc) &
+ QMI_CFG_DEQ_MASK;
+ /* if deq_th is too small, we enlarge it to the min
+ * value that is still 0.
+ * depTh may not be larger than 63
+ * (fman->state->qmi_max_num_of_tnums-1).
+ */
+ if ((deq_th <= fman->state->accumulated_num_of_deq_tnums) &&
+ (deq_th < fman->state->qmi_max_num_of_tnums - 1)) {
+ deq_th = fman->state->accumulated_num_of_deq_tnums + 1;
+ reg = ioread32be(&fman->qmi_regs->fmqm_gc);
+ reg &= ~QMI_CFG_DEQ_MASK;
+ reg |= deq_th;
+ iowrite32be(reg, &fman->qmi_regs->fmqm_gc);
+ }
+ }
+
+ err = set_size_of_fifo(fman, port_params->port_id,
+ &port_params->size_of_fifo,
+ &port_params->extra_size_of_fifo);
+ if (err)
+ goto return_err;
+
+ err = set_num_of_open_dmas(fman, port_params->port_id,
+ &port_params->num_of_open_dmas,
+ &port_params->num_of_extra_open_dmas);
+ if (err)
+ goto return_err;
+
+ set_port_liodn(fman, port_id, fman->liodn_base[port_id],
+ fman->liodn_offset[port_id]);
+
+ if (fman->state->rev_info.major < 6)
+ set_port_order_restoration(fman->fpm_regs, port_id);
+
+ mac_id = hw_port_id_to_sw_port_id(fman->state->rev_info.major, port_id);
+
+ if (port_params->max_frame_length >= fman->state->mac_mfl[mac_id]) {
+ fman->state->port_mfl[mac_id] = port_params->max_frame_length;
+ } else {
+ dev_warn(fman->dev, "%s: Port (%d) max_frame_length is smaller than MAC (%d) current MTU\n",
+ __func__, port_id, mac_id);
+ err = -EINVAL;
+ goto return_err;
+ }
+
+ spin_unlock_irqrestore(&fman->spinlock, flags);
+
+ return 0;
+
+return_err:
+ spin_unlock_irqrestore(&fman->spinlock, flags);
+ return err;
+}
+
+/**
+ * fman_reset_mac
+ * @fman: A Pointer to FMan device
+ * @mac_id: MAC id to be reset
+ *
+ * Reset a specific MAC
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+int fman_reset_mac(struct fman *fman, u8 mac_id)
+{
+ struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs;
+ u32 msk, timeout = 100;
+
+ if (fman->state->rev_info.major >= 6) {
+ dev_err(fman->dev, "%s: FMan MAC reset no available for FMan V3!\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Get the relevant bit mask */
+ switch (mac_id) {
+ case 0:
+ msk = FPM_RSTC_MAC0_RESET;
+ break;
+ case 1:
+ msk = FPM_RSTC_MAC1_RESET;
+ break;
+ case 2:
+ msk = FPM_RSTC_MAC2_RESET;
+ break;
+ case 3:
+ msk = FPM_RSTC_MAC3_RESET;
+ break;
+ case 4:
+ msk = FPM_RSTC_MAC4_RESET;
+ break;
+ case 5:
+ msk = FPM_RSTC_MAC5_RESET;
+ break;
+ case 6:
+ msk = FPM_RSTC_MAC6_RESET;
+ break;
+ case 7:
+ msk = FPM_RSTC_MAC7_RESET;
+ break;
+ case 8:
+ msk = FPM_RSTC_MAC8_RESET;
+ break;
+ case 9:
+ msk = FPM_RSTC_MAC9_RESET;
+ break;
+ default:
+ dev_warn(fman->dev, "%s: Illegal MAC Id [%d]\n",
+ __func__, mac_id);
+ return -EINVAL;
+ }
+
+ /* reset */
+ iowrite32be(msk, &fpm_rg->fm_rstc);
+ while ((ioread32be(&fpm_rg->fm_rstc) & msk) && --timeout)
+ udelay(10);
+
+ if (!timeout)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * fman_set_mac_max_frame
+ * @fman: A Pointer to FMan device
+ * @mac_id: MAC id
+ * @mfl: Maximum frame length
+ *
+ * Set maximum frame length of specific MAC in FMan driver
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+int fman_set_mac_max_frame(struct fman *fman, u8 mac_id, u16 mfl)
+{
+ /* if port is already initialized, check that MaxFrameLength is smaller
+ * or equal to the port's max
+ */
+ if ((!fman->state->port_mfl[mac_id]) ||
+ (fman->state->port_mfl[mac_id] &&
+ (mfl <= fman->state->port_mfl[mac_id]))) {
+ fman->state->mac_mfl[mac_id] = mfl;
+ } else {
+ dev_warn(fman->dev, "%s: MAC max_frame_length is larger than Port max_frame_length\n",
+ __func__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * fman_get_clock_freq
+ * @fman: A Pointer to FMan device
+ *
+ * Get FMan clock frequency
+ *
+ * Return: FMan clock frequency
+ */
+u16 fman_get_clock_freq(struct fman *fman)
+{
+ return fman->state->fm_clk_freq;
+}
+
+/**
+ * fman_get_bmi_max_fifo_size
+ * @fman: A Pointer to FMan device
+ *
+ * Get FMan maximum FIFO size
+ *
+ * Return: FMan Maximum FIFO size
+ */
+u32 fman_get_bmi_max_fifo_size(struct fman *fman)
+{
+ return fman->state->bmi_max_fifo_size;
+}
+
+/**
+ * fman_get_revision
+ * @fman - Pointer to the FMan module
+ * @rev_info - A structure of revision information parameters.
+ *
+ * Returns the FM revision
+ *
+ * Allowed only following fman_init().
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+void fman_get_revision(struct fman *fman, struct fman_rev_info *rev_info)
+{
+ u32 tmp;
+
+ tmp = ioread32be(&fman->fpm_regs->fm_ip_rev_1);
+ rev_info->major = (u8)((tmp & FPM_REV1_MAJOR_MASK) >>
+ FPM_REV1_MAJOR_SHIFT);
+ rev_info->minor = tmp & FPM_REV1_MINOR_MASK;
+}
+
+/**
+ * fman_get_qman_channel_id
+ * @fman: A Pointer to FMan device
+ * @port_id: Port id
+ *
+ * Get QMan channel ID associated to the Port id
+ *
+ * Return: QMan channel ID
+ */
+u32 fman_get_qman_channel_id(struct fman *fman, u32 port_id)
+{
+ int i;
+
+ if (fman->state->rev_info.major >= 6) {
+ u32 port_ids[] = {0x30, 0x31, 0x28, 0x29, 0x2a, 0x2b,
+ 0x2c, 0x2d, 0x2, 0x3, 0x4, 0x5, 0x7, 0x7};
+ for (i = 0; i < fman->state->num_of_qman_channels; i++) {
+ if (port_ids[i] == port_id)
+ break;
+ }
+ } else {
+ u32 port_ids[] = {0x30, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x1,
+ 0x2, 0x3, 0x4, 0x5, 0x7, 0x7};
+ for (i = 0; i < fman->state->num_of_qman_channels; i++) {
+ if (port_ids[i] == port_id)
+ break;
+ }
+ }
+
+ if (i == fman->state->num_of_qman_channels)
+ return 0;
+
+ return fman->state->qman_channel_base + i;
+}
+
+/**
+ * fman_get_mem_region
+ * @fman: A Pointer to FMan device
+ *
+ * Get FMan memory region
+ *
+ * Return: A structure with FMan memory region information
+ */
+struct resource *fman_get_mem_region(struct fman *fman)
+{
+ return fman->state->res;
+}
+
+/* Bootargs defines */
+/* Extra headroom for RX buffers - Default, min and max */
+#define FSL_FM_RX_EXTRA_HEADROOM 64
+#define FSL_FM_RX_EXTRA_HEADROOM_MIN 16
+#define FSL_FM_RX_EXTRA_HEADROOM_MAX 384
+
+/* Maximum frame length */
+#define FSL_FM_MAX_FRAME_SIZE 1522
+#define FSL_FM_MAX_POSSIBLE_FRAME_SIZE 9600
+#define FSL_FM_MIN_POSSIBLE_FRAME_SIZE 64
+
+/* Extra headroom for Rx buffers.
+ * FMan is instructed to allocate, on the Rx path, this amount of
+ * space at the beginning of a data buffer, beside the DPA private
+ * data area and the IC fields.
+ * Does not impact Tx buffer layout.
+ * Configurable from bootargs. 64 by default, it's needed on
+ * particular forwarding scenarios that add extra headers to the
+ * forwarded frame.
+ */
+int fsl_fm_rx_extra_headroom = FSL_FM_RX_EXTRA_HEADROOM;
+module_param(fsl_fm_rx_extra_headroom, int, 0);
+MODULE_PARM_DESC(fsl_fm_rx_extra_headroom, "Extra headroom for Rx buffers");
+
+/* Max frame size, across all interfaces.
+ * Configurable from bootargs, to avoid allocating oversized (socket)
+ * buffers when not using jumbo frames.
+ * Must be large enough to accommodate the network MTU, but small enough
+ * to avoid wasting skb memory.
+ *
+ * Could be overridden once, at boot-time, via the
+ * fm_set_max_frm() callback.
+ */
+int fsl_fm_max_frm = FSL_FM_MAX_FRAME_SIZE;
+module_param(fsl_fm_max_frm, int, 0);
+MODULE_PARM_DESC(fsl_fm_max_frm, "Maximum frame size, across all interfaces");
+
+/**
+ * fman_get_max_frm
+ *
+ * Return: Max frame length configured in the FM driver
+ */
+u16 fman_get_max_frm(void)
+{
+ static bool fm_check_mfl;
+
+ if (!fm_check_mfl) {
+ if (fsl_fm_max_frm > FSL_FM_MAX_POSSIBLE_FRAME_SIZE ||
+ fsl_fm_max_frm < FSL_FM_MIN_POSSIBLE_FRAME_SIZE) {
+ pr_warn("Invalid fsl_fm_max_frm value (%d) in bootargs, valid range is %d-%d. Falling back to the default (%d)\n",
+ fsl_fm_max_frm,
+ FSL_FM_MIN_POSSIBLE_FRAME_SIZE,
+ FSL_FM_MAX_POSSIBLE_FRAME_SIZE,
+ FSL_FM_MAX_FRAME_SIZE);
+ fsl_fm_max_frm = FSL_FM_MAX_FRAME_SIZE;
+ }
+ fm_check_mfl = true;
+ }
+
+ return fsl_fm_max_frm;
+}
+EXPORT_SYMBOL(fman_get_max_frm);
+
+/**
+ * fman_get_rx_extra_headroom
+ *
+ * Return: Extra headroom size configured in the FM driver
+ */
+int fman_get_rx_extra_headroom(void)
+{
+ static bool fm_check_rx_extra_headroom;
+
+ if (!fm_check_rx_extra_headroom) {
+ if (fsl_fm_rx_extra_headroom > FSL_FM_RX_EXTRA_HEADROOM_MAX ||
+ fsl_fm_rx_extra_headroom < FSL_FM_RX_EXTRA_HEADROOM_MIN) {
+ pr_warn("Invalid fsl_fm_rx_extra_headroom value (%d) in bootargs, valid range is %d-%d. Falling back to the default (%d)\n",
+ fsl_fm_rx_extra_headroom,
+ FSL_FM_RX_EXTRA_HEADROOM_MIN,
+ FSL_FM_RX_EXTRA_HEADROOM_MAX,
+ FSL_FM_RX_EXTRA_HEADROOM);
+ fsl_fm_rx_extra_headroom = FSL_FM_RX_EXTRA_HEADROOM;
+ }
+
+ fm_check_rx_extra_headroom = true;
+ fsl_fm_rx_extra_headroom = ALIGN(fsl_fm_rx_extra_headroom, 16);
+ }
+
+ return fsl_fm_rx_extra_headroom;
+}
+EXPORT_SYMBOL(fman_get_rx_extra_headroom);
+
+/**
+ * fman_bind
+ * @dev: FMan OF device pointer
+ *
+ * Bind to a specific FMan device.
+ *
+ * Allowed only after the port was created.
+ *
+ * Return: A pointer to the FMan device
+ */
+struct fman *fman_bind(struct device *fm_dev)
+{
+ return (struct fman *)(dev_get_drvdata(get_device(fm_dev)));
+}
+
+static irqreturn_t fman_err_irq(int irq, void *handle)
+{
+ struct fman *fman = (struct fman *)handle;
+ u32 pending;
+ struct fman_fpm_regs __iomem *fpm_rg;
+ irqreturn_t single_ret, ret = IRQ_NONE;
+
+ if (!is_init_done(fman->cfg))
+ return IRQ_NONE;
+
+ fpm_rg = fman->fpm_regs;
+
+ /* error interrupts */
+ pending = ioread32be(&fpm_rg->fm_epi);
+ if (!pending)
+ return IRQ_NONE;
+
+ if (pending & ERR_INTR_EN_BMI) {
+ single_ret = bmi_err_event(fman);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_QMI) {
+ single_ret = qmi_err_event(fman);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_FPM) {
+ single_ret = fpm_err_event(fman);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_DMA) {
+ single_ret = dma_err_event(fman);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MURAM) {
+ single_ret = muram_err_intr(fman);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+
+ /* MAC error interrupts */
+ if (pending & ERR_INTR_EN_MAC0) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 0);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MAC1) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 1);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MAC2) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 2);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MAC3) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 3);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MAC4) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 4);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MAC5) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 5);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MAC6) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 6);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MAC7) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 7);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MAC8) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 8);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MAC9) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 9);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static irqreturn_t fman_irq(int irq, void *handle)
+{
+ struct fman *fman = (struct fman *)handle;
+ u32 pending;
+ struct fman_fpm_regs __iomem *fpm_rg;
+ irqreturn_t single_ret, ret = IRQ_NONE;
+
+ if (!is_init_done(fman->cfg))
+ return IRQ_NONE;
+
+ fpm_rg = fman->fpm_regs;
+
+ /* normal interrupts */
+ pending = ioread32be(&fpm_rg->fm_npi);
+ if (!pending)
+ return IRQ_NONE;
+
+ if (pending & INTR_EN_QMI) {
+ single_ret = qmi_event(fman);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+
+ /* MAC interrupts */
+ if (pending & INTR_EN_MAC0) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 0);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & INTR_EN_MAC1) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 1);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & INTR_EN_MAC2) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 2);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & INTR_EN_MAC3) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 3);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & INTR_EN_MAC4) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 4);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & INTR_EN_MAC5) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 5);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & INTR_EN_MAC6) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 6);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & INTR_EN_MAC7) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 7);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & INTR_EN_MAC8) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 8);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & INTR_EN_MAC9) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 9);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static const struct of_device_id fman_muram_match[] = {
+ {
+ .compatible = "fsl,fman-muram"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, fman_muram_match);
+
+static struct fman *read_dts_node(struct platform_device *of_dev)
+{
+ struct fman *fman;
+ struct device_node *fm_node, *muram_node;
+ struct resource *res;
+ const u32 *u32_prop;
+ int lenp, err, irq;
+ struct clk *clk;
+ u32 clk_rate;
+ phys_addr_t phys_base_addr;
+ resource_size_t mem_size;
+
+ fman = kzalloc(sizeof(*fman), GFP_KERNEL);
+ if (!fman)
+ return NULL;
+
+ fm_node = of_node_get(of_dev->dev.of_node);
+
+ u32_prop = (const u32 *)of_get_property(fm_node, "cell-index", &lenp);
+ if (!u32_prop) {
+ dev_err(&of_dev->dev, "%s: of_get_property(%s, cell-index) failed\n",
+ __func__, fm_node->full_name);
+ goto fman_node_put;
+ }
+ if (WARN_ON(lenp != sizeof(u32)))
+ goto fman_node_put;
+
+ fman->dts_params.id = (u8)fdt32_to_cpu(u32_prop[0]);
+
+ /* Get the FM interrupt */
+ res = platform_get_resource(of_dev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(&of_dev->dev, "%s: Can't get FMan IRQ resource\n",
+ __func__);
+ goto fman_node_put;
+ }
+ irq = res->start;
+
+ /* Get the FM error interrupt */
+ res = platform_get_resource(of_dev, IORESOURCE_IRQ, 1);
+ if (!res) {
+ dev_err(&of_dev->dev, "%s: Can't get FMan Error IRQ resource\n",
+ __func__);
+ goto fman_node_put;
+ }
+ fman->dts_params.err_irq = res->start;
+
+ /* Get the FM address */
+ res = platform_get_resource(of_dev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&of_dev->dev, "%s: Can't get FMan memory resouce\n",
+ __func__);
+ goto fman_node_put;
+ }
+
+ phys_base_addr = res->start;
+ mem_size = resource_size(res);
+
+ clk = of_clk_get(fm_node, 0);
+ if (IS_ERR(clk)) {
+ dev_err(&of_dev->dev, "%s: Failed to get FM%d clock structure\n",
+ __func__, fman->dts_params.id);
+ goto fman_node_put;
+ }
+
+ clk_rate = clk_get_rate(clk);
+ if (!clk_rate) {
+ dev_err(&of_dev->dev, "%s: Failed to determine FM%d clock rate\n",
+ __func__, fman->dts_params.id);
+ goto fman_node_put;
+ }
+ /* Rounding to MHz */
+ fman->dts_params.clk_freq = DIV_ROUND_UP(clk_rate, 1000000);
+
+ u32_prop = (const u32 *)of_get_property(fm_node,
+ "fsl,qman-channel-range",
+ &lenp);
+ if (!u32_prop) {
+ dev_err(&of_dev->dev, "%s: of_get_property(%s, fsl,qman-channel-range) failed\n",
+ __func__, fm_node->full_name);
+ goto fman_node_put;
+ }
+ if (WARN_ON(lenp != sizeof(u32) * 2))
+ goto fman_node_put;
+ fman->dts_params.qman_channel_base = fdt32_to_cpu(u32_prop[0]);
+ fman->dts_params.num_of_qman_channels = fdt32_to_cpu(u32_prop[1]);
+
+ /* Get the MURAM base address and size */
+ muram_node = of_find_matching_node(fm_node, fman_muram_match);
+ if (!muram_node) {
+ dev_err(&of_dev->dev, "%s: could not find MURAM node\n",
+ __func__);
+ goto fman_node_put;
+ }
+
+ err = of_address_to_resource(muram_node, 0,
+ &fman->dts_params.muram_res);
+ if (err) {
+ of_node_put(muram_node);
+ dev_err(&of_dev->dev, "%s: of_address_to_resource() = %d\n",
+ __func__, err);
+ goto fman_node_put;
+ }
+
+ of_node_put(muram_node);
+ of_node_put(fm_node);
+
+ err = devm_request_irq(&of_dev->dev, irq, fman_irq, 0, "fman", fman);
+ if (err < 0) {
+ dev_err(&of_dev->dev, "%s: irq %d allocation failed (error = %d)\n",
+ __func__, irq, err);
+ goto fman_free;
+ }
+
+ if (fman->dts_params.err_irq != 0) {
+ err = devm_request_irq(&of_dev->dev, fman->dts_params.err_irq,
+ fman_err_irq, IRQF_SHARED,
+ "fman-err", fman);
+ if (err < 0) {
+ dev_err(&of_dev->dev, "%s: irq %d allocation failed (error = %d)\n",
+ __func__, fman->dts_params.err_irq, err);
+ goto fman_free;
+ }
+ }
+
+ fman->dts_params.res =
+ devm_request_mem_region(&of_dev->dev, phys_base_addr,
+ mem_size, "fman");
+ if (!fman->dts_params.res) {
+ dev_err(&of_dev->dev, "%s: request_mem_region() failed\n",
+ __func__);
+ goto fman_free;
+ }
+
+ fman->dts_params.base_addr =
+ devm_ioremap(&of_dev->dev, phys_base_addr, mem_size);
+ if (fman->dts_params.base_addr == 0) {
+ dev_err(&of_dev->dev, "%s: devm_ioremap() failed\n", __func__);
+ goto fman_free;
+ }
+
+ return fman;
+
+fman_node_put:
+ of_node_put(fm_node);
+fman_free:
+ kfree(fman);
+ return NULL;
+}
+
+static int fman_probe(struct platform_device *of_dev)
+{
+ struct fman *fman;
+ struct device *dev;
+ int err;
+
+ dev = &of_dev->dev;
+
+ fman = read_dts_node(of_dev);
+ if (!fman)
+ return -EIO;
+
+ err = fman_config(fman);
+ if (err) {
+ dev_err(dev, "%s: FMan config failed\n", __func__);
+ return -EINVAL;
+ }
+
+ if (fman_init(fman) != 0) {
+ dev_err(dev, "%s: FMan init failed\n", __func__);
+ return -EINVAL;
+ }
+
+ if (fman->dts_params.err_irq == 0) {
+ fman_set_exception(fman, FMAN_EX_DMA_BUS_ERROR, false);
+ fman_set_exception(fman, FMAN_EX_DMA_READ_ECC, false);
+ fman_set_exception(fman, FMAN_EX_DMA_SYSTEM_WRITE_ECC, false);
+ fman_set_exception(fman, FMAN_EX_DMA_FM_WRITE_ECC, false);
+ fman_set_exception(fman, FMAN_EX_DMA_SINGLE_PORT_ECC, false);
+ fman_set_exception(fman, FMAN_EX_FPM_STALL_ON_TASKS, false);
+ fman_set_exception(fman, FMAN_EX_FPM_SINGLE_ECC, false);
+ fman_set_exception(fman, FMAN_EX_FPM_DOUBLE_ECC, false);
+ fman_set_exception(fman, FMAN_EX_QMI_SINGLE_ECC, false);
+ fman_set_exception(fman, FMAN_EX_QMI_DOUBLE_ECC, false);
+ fman_set_exception(fman,
+ FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID, false);
+ fman_set_exception(fman, FMAN_EX_BMI_LIST_RAM_ECC, false);
+ fman_set_exception(fman, FMAN_EX_BMI_STORAGE_PROFILE_ECC,
+ false);
+ fman_set_exception(fman, FMAN_EX_BMI_STATISTICS_RAM_ECC, false);
+ fman_set_exception(fman, FMAN_EX_BMI_DISPATCH_RAM_ECC, false);
+ }
+
+ dev_set_drvdata(dev, fman);
+
+ fman->dev = dev;
+
+ dev_dbg(dev, "FMan%d probed\n", fman->dts_params.id);
+
+ return 0;
+}
+
+static const struct of_device_id fman_match[] = {
+ {
+ .compatible = "fsl,fman"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, fm_match);
+
+static struct platform_driver fman_driver = {
+ .driver = {
+ .name = "fsl-fman",
+ .of_match_table = fman_match,
+ },
+ .probe = fman_probe,
+};
+
+builtin_platform_driver(fman_driver);
diff --git a/drivers/net/ethernet/freescale/fman/fman.h b/drivers/net/ethernet/freescale/fman/fman.h
new file mode 100644
index 0000000..57aae8d
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fman/fman.h
@@ -0,0 +1,325 @@
+/*
+ * Copyright 2008-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __FM_H
+#define __FM_H
+
+#include <linux/io.h>
+
+/* FM Frame descriptor macros */
+/* Frame queue Context Override */
+#define FM_FD_CMD_FCO 0x80000000
+#define FM_FD_CMD_RPD 0x40000000 /* Read Prepended Data */
+#define FM_FD_CMD_DTC 0x10000000 /* Do L4 Checksum */
+
+/* TX-Port: Unsupported Format */
+#define FM_FD_ERR_UNSUPPORTED_FORMAT 0x04000000
+/* TX Port: Length Error */
+#define FM_FD_ERR_LENGTH 0x02000000
+#define FM_FD_ERR_DMA 0x01000000 /* DMA Data error */
+
+/* IPR frame (not error) */
+#define FM_FD_IPR 0x00000001
+/* IPR non-consistent-sp */
+#define FM_FD_ERR_IPR_NCSP (0x00100000 | FM_FD_IPR)
+/* IPR error */
+#define FM_FD_ERR_IPR (0x00200000 | FM_FD_IPR)
+/* IPR timeout */
+#define FM_FD_ERR_IPR_TO (0x00300000 | FM_FD_IPR)
+/* TX Port: Length Error */
+#define FM_FD_ERR_IPRE (FM_FD_ERR_IPR & ~FM_FD_IPR)
+
+/* Rx FIFO overflow, FCS error, code error, running disparity error
+ * (SGMII and TBI modes), FIFO parity error. PHY Sequence error,
+ * PHY error control character detected.
+ */
+#define FM_FD_ERR_PHYSICAL 0x00080000
+/* Frame too long OR Frame size exceeds max_length_frame */
+#define FM_FD_ERR_SIZE 0x00040000
+/* classification discard */
+#define FM_FD_ERR_CLS_DISCARD 0x00020000
+/* Extract Out of Frame */
+#define FM_FD_ERR_EXTRACTION 0x00008000
+/* No Scheme Selected */
+#define FM_FD_ERR_NO_SCHEME 0x00004000
+/* Keysize Overflow */
+#define FM_FD_ERR_KEYSIZE_OVERFLOW 0x00002000
+/* Frame color is red */
+#define FM_FD_ERR_COLOR_RED 0x00000800
+/* Frame color is yellow */
+#define FM_FD_ERR_COLOR_YELLOW 0x00000400
+/* Parser Time out Exceed */
+#define FM_FD_ERR_PRS_TIMEOUT 0x00000080
+/* Invalid Soft Parser instruction */
+#define FM_FD_ERR_PRS_ILL_INSTRUCT 0x00000040
+/* Header error was identified during parsing */
+#define FM_FD_ERR_PRS_HDR_ERR 0x00000020
+/* Frame parsed beyind 256 first bytes */
+#define FM_FD_ERR_BLOCK_LIMIT_EXCEEDED 0x00000008
+
+/* non Frame-Manager error */
+#define FM_FD_RX_STATUS_ERR_NON_FM 0x00400000
+
+/* FMan driver defines */
+#define FMAN_BMI_FIFO_UNITS 0x100
+#define OFFSET_UNITS 16
+
+/* BMan defines */
+#define BM_MAX_NUM_OF_POOLS 64 /* Buffers pools */
+#define FMAN_PORT_MAX_EXT_POOLS_NUM 8 /* External BM pools per Rx port */
+
+struct fman; /* FMan data */
+
+/* Enum for defining port types */
+enum fman_port_type {
+ FMAN_PORT_TYPE_TX = 0, /* TX Port */
+ FMAN_PORT_TYPE_RX, /* RX Port */
+};
+
+struct fman_rev_info {
+ u8 major; /* Major revision */
+ u8 minor; /* Minor revision */
+};
+
+enum fman_exceptions {
+ FMAN_EX_DMA_BUS_ERROR = 0, /* DMA bus error. */
+ FMAN_EX_DMA_READ_ECC, /* Read Buffer ECC error */
+ FMAN_EX_DMA_SYSTEM_WRITE_ECC, /* Write Buffer ECC err on sys side */
+ FMAN_EX_DMA_FM_WRITE_ECC, /* Write Buffer ECC error on FM side */
+ FMAN_EX_DMA_SINGLE_PORT_ECC, /* Single Port ECC error on FM side */
+ FMAN_EX_FPM_STALL_ON_TASKS, /* Stall of tasks on FPM */
+ FMAN_EX_FPM_SINGLE_ECC, /* Single ECC on FPM. */
+ FMAN_EX_FPM_DOUBLE_ECC, /* Double ECC error on FPM ram access */
+ FMAN_EX_QMI_SINGLE_ECC, /* Single ECC on QMI. */
+ FMAN_EX_QMI_DOUBLE_ECC, /* Double bit ECC occurred on QMI */
+ FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID,/* DeQ from unknown port id */
+ FMAN_EX_BMI_LIST_RAM_ECC, /* Linked List RAM ECC error */
+ FMAN_EX_BMI_STORAGE_PROFILE_ECC,/* storage profile */
+ FMAN_EX_BMI_STATISTICS_RAM_ECC,/* Statistics RAM ECC Err Enable */
+ FMAN_EX_BMI_DISPATCH_RAM_ECC, /* Dispatch RAM ECC Error Enable */
+ FMAN_EX_IRAM_ECC, /* Double bit ECC occurred on IRAM */
+ FMAN_EX_MURAM_ECC /* Double bit ECC occurred on MURAM */
+};
+
+/* Parse results memory layout */
+struct fman_prs_result {
+ u8 lpid; /* Logical port id */
+ u8 shimr; /* Shim header result */
+ u16 l2r; /* Layer 2 result */
+ u16 l3r; /* Layer 3 result */
+ u8 l4r; /* Layer 4 result */
+ u8 cplan; /* Classification plan id */
+ u16 nxthdr; /* Next Header */
+ u16 cksum; /* Running-sum */
+ /* Flags&fragment-offset field of the last IP-header */
+ u16 flags_frag_off;
+ /* Routing type field of a IPV6 routing extension header */
+ u8 route_type;
+ /* Routing Extension Header Present; last bit is IP valid */
+ u8 rhp_ip_valid;
+ u8 shim_off[2]; /* Shim offset */
+ u8 ip_pid_off; /* IP PID (last IP-proto) offset */
+ u8 eth_off; /* ETH offset */
+ u8 llc_snap_off; /* LLC_SNAP offset */
+ u8 vlan_off[2]; /* VLAN offset */
+ u8 etype_off; /* ETYPE offset */
+ u8 pppoe_off; /* PPP offset */
+ u8 mpls_off[2]; /* MPLS offset */
+ u8 ip_off[2]; /* IP offset */
+ u8 gre_off; /* GRE offset */
+ u8 l4_off; /* Layer 4 offset */
+ u8 nxthdr_off; /* Parser end point */
+};
+
+/* A structure for defining buffer prefix area content. */
+struct fman_buffer_prefix_content {
+ /* Number of bytes to be left at the beginning of the external
+ * buffer; Note that the private-area will start from the base
+ * of the buffer address.
+ */
+ u16 priv_data_size;
+ /* true to pass the parse result to/from the FM;
+ * User may use FM_PORT_GetBufferPrsResult() in
+ * order to get the parser-result from a buffer.
+ */
+ bool pass_prs_result;
+ /* true to pass the timeStamp to/from the FM User */
+ bool pass_time_stamp;
+ /* true to pass the KG hash result to/from the FM User may
+ * use FM_PORT_GetBufferHashResult() in order to get the
+ * parser-result from a buffer.
+ */
+ bool pass_hash_result;
+ /* Add all other Internal-Context information: AD,
+ * hash-result, key, etc.
+ */
+ u16 data_align;
+};
+
+/* A structure of information about each of the external
+ * buffer pools used by a port or storage-profile.
+ */
+struct fman_ext_pool_params {
+ u8 id; /* External buffer pool id */
+ u16 size; /* External buffer pool buffer size */
+};
+
+/* A structure for informing the driver about the external
+ * buffer pools allocated in the BM and used by a port or a
+ * storage-profile.
+ */
+struct fman_ext_pools {
+ u8 num_of_pools_used; /* Number of pools use by this port */
+ struct fman_ext_pool_params ext_buf_pool[FMAN_PORT_MAX_EXT_POOLS_NUM];
+ /* Parameters for each port */
+};
+
+/* A structure for defining BM pool depletion criteria */
+struct fman_buf_pool_depletion {
+ /* select mode in which pause frames will be sent after a
+ * number of pools (all together!) are depleted
+ */
+ bool pools_grp_mode_enable;
+ /* the number of depleted pools that will invoke pause
+ * frames transmission.
+ */
+ u8 num_of_pools;
+ /* For each pool, true if it should be considered for
+ * depletion (Note - this pool must be used by this port!).
+ */
+ bool pools_to_consider[BM_MAX_NUM_OF_POOLS];
+ /* select mode in which pause frames will be sent
+ * after a single-pool is depleted;
+ */
+ bool single_pool_mode_enable;
+ /* For each pool, true if it should be considered
+ * for depletion (Note - this pool must be used by this port!)
+ */
+ bool pools_to_consider_for_single_mode[BM_MAX_NUM_OF_POOLS];
+};
+
+/* Enum for inter-module interrupts registration */
+enum fman_event_modules {
+ FMAN_MOD_MAC = 0, /* MAC event */
+ FMAN_MOD_FMAN_CTRL, /* FMAN Controller */
+ FMAN_MOD_DUMMY_LAST
+};
+
+/* Enum for interrupts types */
+enum fman_intr_type {
+ FMAN_INTR_TYPE_ERR,
+ FMAN_INTR_TYPE_NORMAL
+};
+
+/* Enum for inter-module interrupts registration */
+enum fman_inter_module_event {
+ FMAN_EV_ERR_MAC0 = 0, /* MAC 0 error event */
+ FMAN_EV_ERR_MAC1, /* MAC 1 error event */
+ FMAN_EV_ERR_MAC2, /* MAC 2 error event */
+ FMAN_EV_ERR_MAC3, /* MAC 3 error event */
+ FMAN_EV_ERR_MAC4, /* MAC 4 error event */
+ FMAN_EV_ERR_MAC5, /* MAC 5 error event */
+ FMAN_EV_ERR_MAC6, /* MAC 6 error event */
+ FMAN_EV_ERR_MAC7, /* MAC 7 error event */
+ FMAN_EV_ERR_MAC8, /* MAC 8 error event */
+ FMAN_EV_ERR_MAC9, /* MAC 9 error event */
+ FMAN_EV_MAC0, /* MAC 0 event (Magic packet detection) */
+ FMAN_EV_MAC1, /* MAC 1 event (Magic packet detection) */
+ FMAN_EV_MAC2, /* MAC 2 (Magic packet detection) */
+ FMAN_EV_MAC3, /* MAC 3 (Magic packet detection) */
+ FMAN_EV_MAC4, /* MAC 4 (Magic packet detection) */
+ FMAN_EV_MAC5, /* MAC 5 (Magic packet detection) */
+ FMAN_EV_MAC6, /* MAC 6 (Magic packet detection) */
+ FMAN_EV_MAC7, /* MAC 7 (Magic packet detection) */
+ FMAN_EV_MAC8, /* MAC 8 event (Magic packet detection) */
+ FMAN_EV_MAC9, /* MAC 9 event (Magic packet detection) */
+ FMAN_EV_FMAN_CTRL_0, /* Fman controller event 0 */
+ FMAN_EV_FMAN_CTRL_1, /* Fman controller event 1 */
+ FMAN_EV_FMAN_CTRL_2, /* Fman controller event 2 */
+ FMAN_EV_FMAN_CTRL_3, /* Fman controller event 3 */
+ FMAN_EV_CNT
+};
+
+struct fman_intr_src {
+ void (*isr_cb)(void *src_arg);
+ void *src_handle;
+};
+
+/* Structure for port-FM communication during fman_port_init. */
+struct fman_port_init_params {
+ u8 port_id; /* port Id */
+ enum fman_port_type port_type; /* Port type */
+ u16 port_speed; /* Port speed */
+ u16 liodn_offset; /* Port's requested resource */
+ u8 num_of_tasks; /* Port's requested resource */
+ u8 num_of_extra_tasks; /* Port's requested resource */
+ u8 num_of_open_dmas; /* Port's requested resource */
+ u8 num_of_extra_open_dmas; /* Port's requested resource */
+ u32 size_of_fifo; /* Port's requested resource */
+ u32 extra_size_of_fifo; /* Port's requested resource */
+ u8 deq_pipeline_depth; /* Port's requested resource */
+ u16 max_frame_length; /* Port's max frame length. */
+ u16 liodn_base;
+ /* LIODN base for this port, to be used together with LIODN offset. */
+};
+
+void fman_get_revision(struct fman *fman, struct fman_rev_info *rev_info);
+
+void fman_register_intr(struct fman *fman, enum fman_event_modules mod,
+ u8 mod_id, enum fman_intr_type intr_type,
+ void (*f_isr)(void *h_src_arg), void *h_src_arg);
+
+void fman_unregister_intr(struct fman *fman, enum fman_event_modules mod,
+ u8 mod_id, enum fman_intr_type intr_type);
+
+int fman_set_port_params(struct fman *fman,
+ struct fman_port_init_params *port_params);
+
+int fman_reset_mac(struct fman *fman, u8 mac_id);
+
+u16 fman_get_clock_freq(struct fman *fman);
+
+u32 fman_get_bmi_max_fifo_size(struct fman *fman);
+
+int fman_set_mac_max_frame(struct fman *fman, u8 mac_id, u16 mfl);
+
+u32 fman_get_qman_channel_id(struct fman *fman, u32 port_id);
+
+struct resource *fman_get_mem_region(struct fman *fman);
+
+u16 fman_get_max_frm(void);
+
+int fman_get_rx_extra_headroom(void);
+
+struct fman *fman_bind(struct device *dev);
+
+#endif /* __FM_H */
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
new file mode 100644
index 0000000..d78e2ba
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -0,0 +1,1453 @@
+/*
+ * Copyright 2008-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "fman_dtsec.h"
+#include "fman.h"
+
+#include <linux/slab.h>
+#include <linux/bitrev.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/phy.h>
+#include <linux/crc32.h>
+#include <linux/of_mdio.h>
+#include <linux/mii.h>
+
+/* TBI register addresses */
+#define MII_TBICON 0x11
+
+/* TBICON register bit fields */
+#define TBICON_SOFT_RESET 0x8000 /* Soft reset */
+#define TBICON_DISABLE_RX_DIS 0x2000 /* Disable receive disparity */
+#define TBICON_DISABLE_TX_DIS 0x1000 /* Disable transmit disparity */
+#define TBICON_AN_SENSE 0x0100 /* Auto-negotiation sense enable */
+#define TBICON_CLK_SELECT 0x0020 /* Clock select */
+#define TBICON_MI_MODE 0x0010 /* GMII mode (TBI if not set) */
+
+#define TBIANA_SGMII 0x4001
+#define TBIANA_1000X 0x01a0
+
+/* Interrupt Mask Register (IMASK) */
+#define DTSEC_IMASK_BREN 0x80000000
+#define DTSEC_IMASK_RXCEN 0x40000000
+#define DTSEC_IMASK_MSROEN 0x04000000
+#define DTSEC_IMASK_GTSCEN 0x02000000
+#define DTSEC_IMASK_BTEN 0x01000000
+#define DTSEC_IMASK_TXCEN 0x00800000
+#define DTSEC_IMASK_TXEEN 0x00400000
+#define DTSEC_IMASK_LCEN 0x00040000
+#define DTSEC_IMASK_CRLEN 0x00020000
+#define DTSEC_IMASK_XFUNEN 0x00010000
+#define DTSEC_IMASK_ABRTEN 0x00008000
+#define DTSEC_IMASK_IFERREN 0x00004000
+#define DTSEC_IMASK_MAGEN 0x00000800
+#define DTSEC_IMASK_MMRDEN 0x00000400
+#define DTSEC_IMASK_MMWREN 0x00000200
+#define DTSEC_IMASK_GRSCEN 0x00000100
+#define DTSEC_IMASK_TDPEEN 0x00000002
+#define DTSEC_IMASK_RDPEEN 0x00000001
+
+#define DTSEC_EVENTS_MASK \
+ ((u32)(DTSEC_IMASK_BREN | \
+ DTSEC_IMASK_RXCEN | \
+ DTSEC_IMASK_BTEN | \
+ DTSEC_IMASK_TXCEN | \
+ DTSEC_IMASK_TXEEN | \
+ DTSEC_IMASK_ABRTEN | \
+ DTSEC_IMASK_LCEN | \
+ DTSEC_IMASK_CRLEN | \
+ DTSEC_IMASK_XFUNEN | \
+ DTSEC_IMASK_IFERREN | \
+ DTSEC_IMASK_MAGEN | \
+ DTSEC_IMASK_TDPEEN | \
+ DTSEC_IMASK_RDPEEN))
+
+/* dtsec timestamp event bits */
+#define TMR_PEMASK_TSREEN 0x00010000
+#define TMR_PEVENT_TSRE 0x00010000
+
+/* Group address bit indication */
+#define MAC_GROUP_ADDRESS 0x0000010000000000ULL
+
+/* Defaults */
+#define DEFAULT_HALFDUP_RETRANSMIT 0xf
+#define DEFAULT_HALFDUP_COLL_WINDOW 0x37
+#define DEFAULT_TX_PAUSE_TIME 0xf000
+#define DEFAULT_RX_PREPEND 0
+#define DEFAULT_PREAMBLE_LEN 7
+#define DEFAULT_TX_PAUSE_TIME_EXTD 0
+#define DEFAULT_NON_BACK_TO_BACK_IPG1 0x40
+#define DEFAULT_NON_BACK_TO_BACK_IPG2 0x60
+#define DEFAULT_MIN_IFG_ENFORCEMENT 0x50
+#define DEFAULT_BACK_TO_BACK_IPG 0x60
+#define DEFAULT_MAXIMUM_FRAME 0x600
+
+/* register related defines (bits, field offsets..) */
+#define DTSEC_ID2_INT_REDUCED_OFF 0x00010000
+
+#define DTSEC_ECNTRL_GMIIM 0x00000040
+#define DTSEC_ECNTRL_TBIM 0x00000020
+#define DTSEC_ECNTRL_SGMIIM 0x00000002
+#define DTSEC_ECNTRL_RPM 0x00000010
+#define DTSEC_ECNTRL_R100M 0x00000008
+#define DTSEC_ECNTRL_QSGMIIM 0x00000001
+
+#define DTSEC_TCTRL_GTS 0x00000020
+
+#define RCTRL_PAL_MASK 0x001f0000
+#define RCTRL_PAL_SHIFT 16
+#define RCTRL_GHTX 0x00000400
+#define RCTRL_GRS 0x00000020
+#define RCTRL_MPROM 0x00000008
+#define RCTRL_RSF 0x00000004
+#define RCTRL_UPROM 0x00000001
+
+#define MACCFG1_SOFT_RESET 0x80000000
+#define MACCFG1_RX_FLOW 0x00000020
+#define MACCFG1_TX_FLOW 0x00000010
+#define MACCFG1_TX_EN 0x00000001
+#define MACCFG1_RX_EN 0x00000004
+
+#define MACCFG2_NIBBLE_MODE 0x00000100
+#define MACCFG2_BYTE_MODE 0x00000200
+#define MACCFG2_PAD_CRC_EN 0x00000004
+#define MACCFG2_FULL_DUPLEX 0x00000001
+#define MACCFG2_PREAMBLE_LENGTH_MASK 0x0000f000
+#define MACCFG2_PREAMBLE_LENGTH_SHIFT 12
+
+#define IPGIFG_NON_BACK_TO_BACK_IPG_1_SHIFT 24
+#define IPGIFG_NON_BACK_TO_BACK_IPG_2_SHIFT 16
+#define IPGIFG_MIN_IFG_ENFORCEMENT_SHIFT 8
+
+#define IPGIFG_NON_BACK_TO_BACK_IPG_1 0x7F000000
+#define IPGIFG_NON_BACK_TO_BACK_IPG_2 0x007F0000
+#define IPGIFG_MIN_IFG_ENFORCEMENT 0x0000FF00
+#define IPGIFG_BACK_TO_BACK_IPG 0x0000007F
+
+#define HAFDUP_EXCESS_DEFER 0x00010000
+#define HAFDUP_COLLISION_WINDOW 0x000003ff
+#define HAFDUP_RETRANSMISSION_MAX_SHIFT 12
+#define HAFDUP_RETRANSMISSION_MAX 0x0000f000
+
+#define NUM_OF_HASH_REGS 8 /* Number of hash table registers */
+
+#define PTV_PTE_MASK 0xffff0000
+#define PTV_PT_MASK 0x0000ffff
+#define PTV_PTE_SHIFT 16
+
+#define MAX_PACKET_ALIGNMENT 31
+#define MAX_INTER_PACKET_GAP 0x7f
+#define MAX_RETRANSMISSION 0x0f
+#define MAX_COLLISION_WINDOW 0x03ff
+
+/* Hash table size (32 bits*8 regs) */
+#define DTSEC_HASH_TABLE_SIZE 256
+/* Extended Hash table size (32 bits*16 regs) */
+#define EXTENDED_HASH_TABLE_SIZE 512
+
+/* dTSEC Memory Map registers */
+struct dtsec_regs {
+ /* dTSEC General Control and Status Registers */
+ u32 tsec_id; /* 0x000 ETSEC_ID register */
+ u32 tsec_id2; /* 0x004 ETSEC_ID2 register */
+ u32 ievent; /* 0x008 Interrupt event register */
+ u32 imask; /* 0x00C Interrupt mask register */
+ u32 reserved0010[1];
+ u32 ecntrl; /* 0x014 E control register */
+ u32 ptv; /* 0x018 Pause time value register */
+ u32 tbipa; /* 0x01C TBI PHY address register */
+ u32 tmr_ctrl; /* 0x020 Time-stamp Control register */
+ u32 tmr_pevent; /* 0x024 Time-stamp event register */
+ u32 tmr_pemask; /* 0x028 Timer event mask register */
+ u32 reserved002c[5];
+ u32 tctrl; /* 0x040 Transmit control register */
+ u32 reserved0044[3];
+ u32 rctrl; /* 0x050 Receive control register */
+ u32 reserved0054[11];
+ u32 igaddr[8]; /* 0x080-0x09C Individual/group address */
+ u32 gaddr[8]; /* 0x0A0-0x0BC Group address registers 0-7 */
+ u32 reserved00c0[16];
+ u32 maccfg1; /* 0x100 MAC configuration #1 */
+ u32 maccfg2; /* 0x104 MAC configuration #2 */
+ u32 ipgifg; /* 0x108 IPG/IFG */
+ u32 hafdup; /* 0x10C Half-duplex */
+ u32 maxfrm; /* 0x110 Maximum frame */
+ u32 reserved0114[10];
+ u32 ifstat; /* 0x13C Interface status */
+ u32 macstnaddr1; /* 0x140 Station Address,part 1 */
+ u32 macstnaddr2; /* 0x144 Station Address,part 2 */
+ struct {
+ u32 exact_match1; /* octets 1-4 */
+ u32 exact_match2; /* octets 5-6 */
+ } macaddr[15]; /* 0x148-0x1BC mac exact match addresses 1-15 */
+ u32 reserved01c0[16];
+ u32 tr64; /* 0x200 Tx and Rx 64 byte frame counter */
+ u32 tr127; /* 0x204 Tx and Rx 65 to 127 byte frame counter */
+ u32 tr255; /* 0x208 Tx and Rx 128 to 255 byte frame counter */
+ u32 tr511; /* 0x20C Tx and Rx 256 to 511 byte frame counter */
+ u32 tr1k; /* 0x210 Tx and Rx 512 to 1023 byte frame counter */
+ u32 trmax; /* 0x214 Tx and Rx 1024 to 1518 byte frame counter */
+ u32 trmgv;
+ /* 0x218 Tx and Rx 1519 to 1522 byte good VLAN frame count */
+ u32 rbyt; /* 0x21C receive byte counter */
+ u32 rpkt; /* 0x220 receive packet counter */
+ u32 rfcs; /* 0x224 receive FCS error counter */
+ u32 rmca; /* 0x228 RMCA Rx multicast packet counter */
+ u32 rbca; /* 0x22C Rx broadcast packet counter */
+ u32 rxcf; /* 0x230 Rx control frame packet counter */
+ u32 rxpf; /* 0x234 Rx pause frame packet counter */
+ u32 rxuo; /* 0x238 Rx unknown OP code counter */
+ u32 raln; /* 0x23C Rx alignment error counter */
+ u32 rflr; /* 0x240 Rx frame length error counter */
+ u32 rcde; /* 0x244 Rx code error counter */
+ u32 rcse; /* 0x248 Rx carrier sense error counter */
+ u32 rund; /* 0x24C Rx undersize packet counter */
+ u32 rovr; /* 0x250 Rx oversize packet counter */
+ u32 rfrg; /* 0x254 Rx fragments counter */
+ u32 rjbr; /* 0x258 Rx jabber counter */
+ u32 rdrp; /* 0x25C Rx drop */
+ u32 tbyt; /* 0x260 Tx byte counter */
+ u32 tpkt; /* 0x264 Tx packet counter */
+ u32 tmca; /* 0x268 Tx multicast packet counter */
+ u32 tbca; /* 0x26C Tx broadcast packet counter */
+ u32 txpf; /* 0x270 Tx pause control frame counter */
+ u32 tdfr; /* 0x274 Tx deferral packet counter */
+ u32 tedf; /* 0x278 Tx excessive deferral packet counter */
+ u32 tscl; /* 0x27C Tx single collision packet counter */
+ u32 tmcl; /* 0x280 Tx multiple collision packet counter */
+ u32 tlcl; /* 0x284 Tx late collision packet counter */
+ u32 txcl; /* 0x288 Tx excessive collision packet counter */
+ u32 tncl; /* 0x28C Tx total collision counter */
+ u32 reserved0290[1];
+ u32 tdrp; /* 0x294 Tx drop frame counter */
+ u32 tjbr; /* 0x298 Tx jabber frame counter */
+ u32 tfcs; /* 0x29C Tx FCS error counter */
+ u32 txcf; /* 0x2A0 Tx control frame counter */
+ u32 tovr; /* 0x2A4 Tx oversize frame counter */
+ u32 tund; /* 0x2A8 Tx undersize frame counter */
+ u32 tfrg; /* 0x2AC Tx fragments frame counter */
+ u32 car1; /* 0x2B0 carry register one register* */
+ u32 car2; /* 0x2B4 carry register two register* */
+ u32 cam1; /* 0x2B8 carry register one mask register */
+ u32 cam2; /* 0x2BC carry register two mask register */
+ u32 reserved02c0[848];
+};
+
+/* struct dtsec_cfg - dTSEC configuration
+ * Transmit half-duplex flow control, under software control for 10/100-Mbps
+ * half-duplex media. If set, back pressure is applied to media by raising
+ * carrier.
+ * halfdup_retransmit:
+ * Number of retransmission attempts following a collision.
+ * If this is exceeded dTSEC aborts transmission due to excessive collisions.
+ * The standard specifies the attempt limit to be 15.
+ * halfdup_coll_window:
+ * The number of bytes of the frame during which collisions may occur.
+ * The default value of 55 corresponds to the frame byte at the end of the
+ * standard 512-bit slot time window. If collisions are detected after this
+ * byte, the late collision event is asserted and transmission of current
+ * frame is aborted.
+ * tx_pad_crc:
+ * Pad and append CRC. If set, the MAC pads all ransmitted short frames and
+ * appends a CRC to every frame regardless of padding requirement.
+ * tx_pause_time:
+ * Transmit pause time value. This pause value is used as part of the pause
+ * frame to be sent when a transmit pause frame is initiated.
+ * If set to 0 this disables transmission of pause frames.
+ * preamble_len:
+ * Length, in bytes, of the preamble field preceding each Ethernet
+ * start-of-frame delimiter byte. The default value of 0x7 should be used in
+ * order to guarantee reliable operation with IEEE 802.3 compliant hardware.
+ * rx_prepend:
+ * Packet alignment padding length. The specified number of bytes (1-31)
+ * of zero padding are inserted before the start of each received frame.
+ * For Ethernet, where optional preamble extraction is enabled, the padding
+ * appears before the preamble, otherwise the padding precedes the
+ * layer 2 header.
+ *
+ * This structure contains basic dTSEC configuration and must be passed to
+ * init() function. A default set of configuration values can be
+ * obtained by calling set_dflts().
+ */
+struct dtsec_cfg {
+ u16 halfdup_retransmit;
+ u16 halfdup_coll_window;
+ bool tx_pad_crc;
+ u16 tx_pause_time;
+ bool ptp_tsu_en;
+ bool ptp_exception_en;
+ u32 preamble_len;
+ u32 rx_prepend;
+ u16 tx_pause_time_extd;
+ u16 maximum_frame;
+ u32 non_back_to_back_ipg1;
+ u32 non_back_to_back_ipg2;
+ u32 min_ifg_enforcement;
+ u32 back_to_back_ipg;
+};
+
+struct fman_mac {
+ /* pointer to dTSEC memory mapped registers */
+ struct dtsec_regs __iomem *regs;
+ /* MAC address of device */
+ u64 addr;
+ /* Ethernet physical interface */
+ phy_interface_t phy_if;
+ u16 max_speed;
+ void *dev_id; /* device cookie used by the exception cbs */
+ fman_mac_exception_cb *exception_cb;
+ fman_mac_exception_cb *event_cb;
+ /* Number of individual addresses in registers for this station */
+ u8 num_of_ind_addr_in_regs;
+ /* pointer to driver's global address hash table */
+ struct eth_hash_t *multicast_addr_hash;
+ /* pointer to driver's individual address hash table */
+ struct eth_hash_t *unicast_addr_hash;
+ u8 mac_id;
+ u32 exceptions;
+ bool ptp_tsu_enabled;
+ bool en_tsu_err_exeption;
+ struct dtsec_cfg *dtsec_drv_param;
+ void *fm;
+ struct fman_rev_info fm_rev_info;
+ bool basex_if;
+ struct phy_device *tbiphy;
+};
+
+static void set_dflts(struct dtsec_cfg *cfg)
+{
+ cfg->halfdup_retransmit = DEFAULT_HALFDUP_RETRANSMIT;
+ cfg->halfdup_coll_window = DEFAULT_HALFDUP_COLL_WINDOW;
+ cfg->tx_pad_crc = true;
+ cfg->tx_pause_time = DEFAULT_TX_PAUSE_TIME;
+ /* PHY address 0 is reserved (DPAA RM) */
+ cfg->rx_prepend = DEFAULT_RX_PREPEND;
+ cfg->ptp_tsu_en = true;
+ cfg->ptp_exception_en = true;
+ cfg->preamble_len = DEFAULT_PREAMBLE_LEN;
+ cfg->tx_pause_time_extd = DEFAULT_TX_PAUSE_TIME_EXTD;
+ cfg->non_back_to_back_ipg1 = DEFAULT_NON_BACK_TO_BACK_IPG1;
+ cfg->non_back_to_back_ipg2 = DEFAULT_NON_BACK_TO_BACK_IPG2;
+ cfg->min_ifg_enforcement = DEFAULT_MIN_IFG_ENFORCEMENT;
+ cfg->back_to_back_ipg = DEFAULT_BACK_TO_BACK_IPG;
+ cfg->maximum_frame = DEFAULT_MAXIMUM_FRAME;
+}
+
+static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
+ phy_interface_t iface, u16 iface_speed, u8 *macaddr,
+ u32 exception_mask, u8 tbi_addr)
+{
+ bool is_rgmii, is_sgmii, is_qsgmii;
+ int i;
+ u32 tmp;
+
+ /* Soft reset */
+ iowrite32be(MACCFG1_SOFT_RESET, &regs->maccfg1);
+ iowrite32be(0, &regs->maccfg1);
+
+ /* dtsec_id2 */
+ tmp = ioread32be(&regs->tsec_id2);
+
+ /* check RGMII support */
+ if (iface == PHY_INTERFACE_MODE_RGMII ||
+ iface == PHY_INTERFACE_MODE_RMII)
+ if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
+ return -EINVAL;
+
+ if (iface == PHY_INTERFACE_MODE_SGMII ||
+ iface == PHY_INTERFACE_MODE_MII)
+ if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
+ return -EINVAL;
+
+ is_rgmii = iface == PHY_INTERFACE_MODE_RGMII;
+ is_sgmii = iface == PHY_INTERFACE_MODE_SGMII;
+ is_qsgmii = iface == PHY_INTERFACE_MODE_QSGMII;
+
+ tmp = 0;
+ if (is_rgmii || iface == PHY_INTERFACE_MODE_GMII)
+ tmp |= DTSEC_ECNTRL_GMIIM;
+ if (is_sgmii)
+ tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM);
+ if (is_qsgmii)
+ tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM |
+ DTSEC_ECNTRL_QSGMIIM);
+ if (is_rgmii)
+ tmp |= DTSEC_ECNTRL_RPM;
+ if (iface_speed == SPEED_100)
+ tmp |= DTSEC_ECNTRL_R100M;
+
+ iowrite32be(tmp, &regs->ecntrl);
+
+ tmp = 0;
+
+ if (cfg->tx_pause_time)
+ tmp |= cfg->tx_pause_time;
+ if (cfg->tx_pause_time_extd)
+ tmp |= cfg->tx_pause_time_extd << PTV_PTE_SHIFT;
+ iowrite32be(tmp, &regs->ptv);
+
+ tmp = 0;
+ tmp |= (cfg->rx_prepend << RCTRL_PAL_SHIFT) & RCTRL_PAL_MASK;
+ /* Accept short frames */
+ tmp |= RCTRL_RSF;
+
+ iowrite32be(tmp, &regs->rctrl);
+
+ /* Assign a Phy Address to the TBI (TBIPA).
+ * Done also in cases where TBI is not selected to avoid conflict with
+ * the external PHY's Physical address
+ */
+ iowrite32be(tbi_addr, &regs->tbipa);
+
+ iowrite32be(0, &regs->tmr_ctrl);
+
+ if (cfg->ptp_tsu_en) {
+ tmp = 0;
+ tmp |= TMR_PEVENT_TSRE;
+ iowrite32be(tmp, &regs->tmr_pevent);
+
+ if (cfg->ptp_exception_en) {
+ tmp = 0;
+ tmp |= TMR_PEMASK_TSREEN;
+ iowrite32be(tmp, &regs->tmr_pemask);
+ }
+ }
+
+ tmp = 0;
+ tmp |= MACCFG1_RX_FLOW;
+ tmp |= MACCFG1_TX_FLOW;
+ iowrite32be(tmp, &regs->maccfg1);
+
+ tmp = 0;
+
+ if (iface_speed < SPEED_1000)
+ tmp |= MACCFG2_NIBBLE_MODE;
+ else if (iface_speed == SPEED_1000)
+ tmp |= MACCFG2_BYTE_MODE;
+
+ tmp |= (cfg->preamble_len << MACCFG2_PREAMBLE_LENGTH_SHIFT) &
+ MACCFG2_PREAMBLE_LENGTH_MASK;
+ if (cfg->tx_pad_crc)
+ tmp |= MACCFG2_PAD_CRC_EN;
+ /* Full Duplex */
+ tmp |= MACCFG2_FULL_DUPLEX;
+ iowrite32be(tmp, &regs->maccfg2);
+
+ tmp = (((cfg->non_back_to_back_ipg1 <<
+ IPGIFG_NON_BACK_TO_BACK_IPG_1_SHIFT)
+ & IPGIFG_NON_BACK_TO_BACK_IPG_1)
+ | ((cfg->non_back_to_back_ipg2 <<
+ IPGIFG_NON_BACK_TO_BACK_IPG_2_SHIFT)
+ & IPGIFG_NON_BACK_TO_BACK_IPG_2)
+ | ((cfg->min_ifg_enforcement << IPGIFG_MIN_IFG_ENFORCEMENT_SHIFT)
+ & IPGIFG_MIN_IFG_ENFORCEMENT)
+ | (cfg->back_to_back_ipg & IPGIFG_BACK_TO_BACK_IPG));
+ iowrite32be(tmp, &regs->ipgifg);
+
+ tmp = 0;
+ tmp |= HAFDUP_EXCESS_DEFER;
+ tmp |= ((cfg->halfdup_retransmit << HAFDUP_RETRANSMISSION_MAX_SHIFT)
+ & HAFDUP_RETRANSMISSION_MAX);
+ tmp |= (cfg->halfdup_coll_window & HAFDUP_COLLISION_WINDOW);
+
+ iowrite32be(tmp, &regs->hafdup);
+
+ /* Initialize Maximum frame length */
+ iowrite32be(cfg->maximum_frame, &regs->maxfrm);
+
+ iowrite32be(0xffffffff, &regs->cam1);
+ iowrite32be(0xffffffff, &regs->cam2);
+
+ iowrite32be(exception_mask, &regs->imask);
+
+ iowrite32be(0xffffffff, &regs->ievent);
+
+ tmp = (u32)((macaddr[5] << 24) |
+ (macaddr[4] << 16) | (macaddr[3] << 8) | macaddr[2]);
+ iowrite32be(tmp, &regs->macstnaddr1);
+
+ tmp = (u32)((macaddr[1] << 24) | (macaddr[0] << 16));
+ iowrite32be(tmp, &regs->macstnaddr2);
+
+ /* HASH */
+ for (i = 0; i < NUM_OF_HASH_REGS; i++) {
+ /* Initialize IADDRx */
+ iowrite32be(0, &regs->igaddr[i]);
+ /* Initialize GADDRx */
+ iowrite32be(0, &regs->gaddr[i]);
+ }
+
+ return 0;
+}
+
+static void set_mac_address(struct dtsec_regs __iomem *regs, u8 *adr)
+{
+ u32 tmp;
+
+ tmp = (u32)((adr[5] << 24) |
+ (adr[4] << 16) | (adr[3] << 8) | adr[2]);
+ iowrite32be(tmp, &regs->macstnaddr1);
+
+ tmp = (u32)((adr[1] << 24) | (adr[0] << 16));
+ iowrite32be(tmp, &regs->macstnaddr2);
+}
+
+static void set_bucket(struct dtsec_regs __iomem *regs, int bucket,
+ bool enable)
+{
+ int reg_idx = (bucket >> 5) & 0xf;
+ int bit_idx = bucket & 0x1f;
+ u32 bit_mask = 0x80000000 >> bit_idx;
+ u32 __iomem *reg;
+
+ if (reg_idx > 7)
+ reg = &regs->gaddr[reg_idx - 8];
+ else
+ reg = &regs->igaddr[reg_idx];
+
+ if (enable)
+ iowrite32be(ioread32be(reg) | bit_mask, reg);
+ else
+ iowrite32be(ioread32be(reg) & (~bit_mask), reg);
+}
+
+static int check_init_parameters(struct fman_mac *dtsec)
+{
+ if (dtsec->max_speed >= SPEED_10000) {
+ pr_err("1G MAC driver supports 1G or lower speeds\n");
+ return -EINVAL;
+ }
+ if (dtsec->addr == 0) {
+ pr_err("Ethernet MAC Must have a valid MAC Address\n");
+ return -EINVAL;
+ }
+ if ((dtsec->dtsec_drv_param)->rx_prepend >
+ MAX_PACKET_ALIGNMENT) {
+ pr_err("packetAlignmentPadding can't be > than %d\n",
+ MAX_PACKET_ALIGNMENT);
+ return -EINVAL;
+ }
+ if (((dtsec->dtsec_drv_param)->non_back_to_back_ipg1 >
+ MAX_INTER_PACKET_GAP) ||
+ ((dtsec->dtsec_drv_param)->non_back_to_back_ipg2 >
+ MAX_INTER_PACKET_GAP) ||
+ ((dtsec->dtsec_drv_param)->back_to_back_ipg >
+ MAX_INTER_PACKET_GAP)) {
+ pr_err("Inter packet gap can't be greater than %d\n",
+ MAX_INTER_PACKET_GAP);
+ return -EINVAL;
+ }
+ if ((dtsec->dtsec_drv_param)->halfdup_retransmit >
+ MAX_RETRANSMISSION) {
+ pr_err("maxRetransmission can't be greater than %d\n",
+ MAX_RETRANSMISSION);
+ return -EINVAL;
+ }
+ if ((dtsec->dtsec_drv_param)->halfdup_coll_window >
+ MAX_COLLISION_WINDOW) {
+ pr_err("collisionWindow can't be greater than %d\n",
+ MAX_COLLISION_WINDOW);
+ return -EINVAL;
+ /* If Auto negotiation process is disabled, need to set up the PHY
+ * using the MII Management Interface
+ */
+ }
+ if (!dtsec->exception_cb) {
+ pr_err("uninitialized exception_cb\n");
+ return -EINVAL;
+ }
+ if (!dtsec->event_cb) {
+ pr_err("uninitialized event_cb\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int get_exception_flag(enum fman_mac_exceptions exception)
+{
+ u32 bit_mask;
+
+ switch (exception) {
+ case FM_MAC_EX_1G_BAB_RX:
+ bit_mask = DTSEC_IMASK_BREN;
+ break;
+ case FM_MAC_EX_1G_RX_CTL:
+ bit_mask = DTSEC_IMASK_RXCEN;
+ break;
+ case FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET:
+ bit_mask = DTSEC_IMASK_GTSCEN;
+ break;
+ case FM_MAC_EX_1G_BAB_TX:
+ bit_mask = DTSEC_IMASK_BTEN;
+ break;
+ case FM_MAC_EX_1G_TX_CTL:
+ bit_mask = DTSEC_IMASK_TXCEN;
+ break;
+ case FM_MAC_EX_1G_TX_ERR:
+ bit_mask = DTSEC_IMASK_TXEEN;
+ break;
+ case FM_MAC_EX_1G_LATE_COL:
+ bit_mask = DTSEC_IMASK_LCEN;
+ break;
+ case FM_MAC_EX_1G_COL_RET_LMT:
+ bit_mask = DTSEC_IMASK_CRLEN;
+ break;
+ case FM_MAC_EX_1G_TX_FIFO_UNDRN:
+ bit_mask = DTSEC_IMASK_XFUNEN;
+ break;
+ case FM_MAC_EX_1G_MAG_PCKT:
+ bit_mask = DTSEC_IMASK_MAGEN;
+ break;
+ case FM_MAC_EX_1G_MII_MNG_RD_COMPLET:
+ bit_mask = DTSEC_IMASK_MMRDEN;
+ break;
+ case FM_MAC_EX_1G_MII_MNG_WR_COMPLET:
+ bit_mask = DTSEC_IMASK_MMWREN;
+ break;
+ case FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET:
+ bit_mask = DTSEC_IMASK_GRSCEN;
+ break;
+ case FM_MAC_EX_1G_DATA_ERR:
+ bit_mask = DTSEC_IMASK_TDPEEN;
+ break;
+ case FM_MAC_EX_1G_RX_MIB_CNT_OVFL:
+ bit_mask = DTSEC_IMASK_MSROEN;
+ break;
+ default:
+ bit_mask = 0;
+ break;
+ }
+
+ return bit_mask;
+}
+
+static bool is_init_done(struct dtsec_cfg *dtsec_drv_params)
+{
+ /* Checks if dTSEC driver parameters were initialized */
+ if (!dtsec_drv_params)
+ return true;
+
+ return false;
+}
+
+static u16 dtsec_get_max_frame_length(struct fman_mac *dtsec)
+{
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+
+ if (is_init_done(dtsec->dtsec_drv_param))
+ return 0;
+
+ return (u16)ioread32be(&regs->maxfrm);
+}
+
+static void dtsec_isr(void *handle)
+{
+ struct fman_mac *dtsec = (struct fman_mac *)handle;
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ u32 event;
+
+ /* do not handle MDIO events */
+ event = ioread32be(&regs->ievent) &
+ (u32)(~(DTSEC_IMASK_MMRDEN | DTSEC_IMASK_MMWREN));
+
+ event &= ioread32be(&regs->imask);
+
+ iowrite32be(event, &regs->ievent);
+
+ if (event & DTSEC_IMASK_BREN)
+ dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_BAB_RX);
+ if (event & DTSEC_IMASK_RXCEN)
+ dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_RX_CTL);
+ if (event & DTSEC_IMASK_GTSCEN)
+ dtsec->exception_cb(dtsec->dev_id,
+ FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET);
+ if (event & DTSEC_IMASK_BTEN)
+ dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_BAB_TX);
+ if (event & DTSEC_IMASK_TXCEN)
+ dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_CTL);
+ if (event & DTSEC_IMASK_TXEEN)
+ dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_ERR);
+ if (event & DTSEC_IMASK_LCEN)
+ dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_LATE_COL);
+ if (event & DTSEC_IMASK_CRLEN)
+ dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_COL_RET_LMT);
+ if (event & DTSEC_IMASK_XFUNEN) {
+ /* FM_TX_LOCKUP_ERRATA_DTSEC6 Errata workaround */
+ if (dtsec->fm_rev_info.major == 2) {
+ u32 tpkt1, tmp_reg1, tpkt2, tmp_reg2, i;
+ /* a. Write 0x00E0_0C00 to DTSEC_ID
+ * This is a read only register
+ * b. Read and save the value of TPKT
+ */
+ tpkt1 = ioread32be(&regs->tpkt);
+
+ /* c. Read the register at dTSEC address offset 0x32C */
+ tmp_reg1 = ioread32be(&regs->reserved02c0[27]);
+
+ /* d. Compare bits [9:15] to bits [25:31] of the
+ * register at address offset 0x32C.
+ */
+ if ((tmp_reg1 & 0x007F0000) !=
+ (tmp_reg1 & 0x0000007F)) {
+ /* If they are not equal, save the value of
+ * this register and wait for at least
+ * MAXFRM*16 ns
+ */
+ usleep_range((u32)(min
+ (dtsec_get_max_frame_length(dtsec) *
+ 16 / 1000, 1)), (u32)
+ (min(dtsec_get_max_frame_length
+ (dtsec) * 16 / 1000, 1) + 1));
+ }
+
+ /* e. Read and save TPKT again and read the register
+ * at dTSEC address offset 0x32C again
+ */
+ tpkt2 = ioread32be(&regs->tpkt);
+ tmp_reg2 = ioread32be(&regs->reserved02c0[27]);
+
+ /* f. Compare the value of TPKT saved in step b to
+ * value read in step e. Also compare bits [9:15] of
+ * the register at offset 0x32C saved in step d to the
+ * value of bits [9:15] saved in step e. If the two
+ * registers values are unchanged, then the transmit
+ * portion of the dTSEC controller is locked up and
+ * the user should proceed to the recover sequence.
+ */
+ if ((tpkt1 == tpkt2) && ((tmp_reg1 & 0x007F0000) ==
+ (tmp_reg2 & 0x007F0000))) {
+ /* recover sequence */
+
+ /* a.Write a 1 to RCTRL[GRS] */
+
+ iowrite32be(ioread32be(&regs->rctrl) |
+ RCTRL_GRS, &regs->rctrl);
+
+ /* b.Wait until IEVENT[GRSC]=1, or at least
+ * 100 us has elapsed.
+ */
+ for (i = 0; i < 100; i++) {
+ if (ioread32be(&regs->ievent) &
+ DTSEC_IMASK_GRSCEN)
+ break;
+ udelay(1);
+ }
+ if (ioread32be(&regs->ievent) &
+ DTSEC_IMASK_GRSCEN)
+ iowrite32be(DTSEC_IMASK_GRSCEN,
+ &regs->ievent);
+ else
+ pr_debug("Rx lockup due to Tx lockup\n");
+
+ /* c.Write a 1 to bit n of FM_RSTC
+ * (offset 0x0CC of FPM)
+ */
+ fman_reset_mac(dtsec->fm, dtsec->mac_id);
+
+ /* d.Wait 4 Tx clocks (32 ns) */
+ udelay(1);
+
+ /* e.Write a 0 to bit n of FM_RSTC. */
+ /* cleared by FMAN
+ */
+ }
+ }
+
+ dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_FIFO_UNDRN);
+ }
+ if (event & DTSEC_IMASK_MAGEN)
+ dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_MAG_PCKT);
+ if (event & DTSEC_IMASK_GRSCEN)
+ dtsec->exception_cb(dtsec->dev_id,
+ FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET);
+ if (event & DTSEC_IMASK_TDPEEN)
+ dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_DATA_ERR);
+ if (event & DTSEC_IMASK_RDPEEN)
+ dtsec->exception_cb(dtsec->dev_id, FM_MAC_1G_RX_DATA_ERR);
+
+ /* masked interrupts */
+ WARN_ON(event & DTSEC_IMASK_ABRTEN);
+ WARN_ON(event & DTSEC_IMASK_IFERREN);
+}
+
+static void dtsec_1588_isr(void *handle)
+{
+ struct fman_mac *dtsec = (struct fman_mac *)handle;
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ u32 event;
+
+ if (dtsec->ptp_tsu_enabled) {
+ event = ioread32be(&regs->tmr_pevent);
+ event &= ioread32be(&regs->tmr_pemask);
+
+ if (event) {
+ iowrite32be(event, &regs->tmr_pevent);
+ WARN_ON(event & TMR_PEVENT_TSRE);
+ dtsec->exception_cb(dtsec->dev_id,
+ FM_MAC_EX_1G_1588_TS_RX_ERR);
+ }
+ }
+}
+
+static void free_init_resources(struct fman_mac *dtsec)
+{
+ fman_unregister_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
+ FMAN_INTR_TYPE_ERR);
+ fman_unregister_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
+ FMAN_INTR_TYPE_NORMAL);
+
+ /* release the driver's group hash table */
+ free_hash_table(dtsec->multicast_addr_hash);
+ dtsec->multicast_addr_hash = NULL;
+
+ /* release the driver's individual hash table */
+ free_hash_table(dtsec->unicast_addr_hash);
+ dtsec->unicast_addr_hash = NULL;
+}
+
+int dtsec_cfg_max_frame_len(struct fman_mac *dtsec, u16 new_val)
+{
+ if (is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ dtsec->dtsec_drv_param->maximum_frame = new_val;
+
+ return 0;
+}
+
+int dtsec_cfg_pad_and_crc(struct fman_mac *dtsec, bool new_val)
+{
+ if (is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ dtsec->dtsec_drv_param->tx_pad_crc = new_val;
+
+ return 0;
+}
+
+int dtsec_enable(struct fman_mac *dtsec, enum comm_mode mode)
+{
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ u32 tmp;
+
+ if (!is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ /* Enable */
+ tmp = ioread32be(&regs->maccfg1);
+ if (mode & COMM_MODE_RX)
+ tmp |= MACCFG1_RX_EN;
+ if (mode & COMM_MODE_TX)
+ tmp |= MACCFG1_TX_EN;
+
+ iowrite32be(tmp, &regs->maccfg1);
+
+ /* Graceful start - clear the graceful receive stop bit */
+ if (mode & COMM_MODE_TX)
+ iowrite32be(ioread32be(&regs->tctrl) & ~DTSEC_TCTRL_GTS,
+ &regs->tctrl);
+ if (mode & COMM_MODE_RX)
+ iowrite32be(ioread32be(&regs->rctrl) & ~RCTRL_GRS,
+ &regs->rctrl);
+
+ return 0;
+}
+
+int dtsec_disable(struct fman_mac *dtsec, enum comm_mode mode)
+{
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ u32 tmp;
+
+ if (!is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ /* Gracefull stop - Assert the graceful transmit stop bit */
+ if (mode & COMM_MODE_RX) {
+ tmp = ioread32be(&regs->rctrl) | RCTRL_GRS;
+ iowrite32be(tmp, &regs->rctrl);
+
+ if (dtsec->fm_rev_info.major == 2)
+ usleep_range(100, 200);
+ else
+ udelay(10);
+ }
+
+ if (mode & COMM_MODE_TX) {
+ if (dtsec->fm_rev_info.major == 2)
+ pr_debug("GTS not supported due to DTSEC_A004 errata.\n");
+ else
+ pr_debug("GTS not supported due to DTSEC_A0014 errata.\n");
+ }
+
+ tmp = ioread32be(&regs->maccfg1);
+ if (mode & COMM_MODE_RX)
+ tmp &= ~MACCFG1_RX_EN;
+ if (mode & COMM_MODE_TX)
+ tmp &= ~MACCFG1_TX_EN;
+
+ iowrite32be(tmp, &regs->maccfg1);
+
+ return 0;
+}
+
+int dtsec_set_tx_pause_frames(struct fman_mac *dtsec,
+ u8 __maybe_unused priority,
+ u16 pause_time, u16 __maybe_unused thresh_time)
+{
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ u32 ptv = 0;
+
+ if (!is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ /* FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 Errata workaround */
+ if (dtsec->fm_rev_info.major == 2)
+ if (pause_time < 0 && pause_time <= 320) {
+ pr_warn("pause-time: %d illegal.Should be > 320\n",
+ pause_time);
+ return -EINVAL;
+ }
+
+ if (pause_time) {
+ ptv = ioread32be(&regs->ptv);
+ ptv &= PTV_PTE_MASK;
+ ptv |= pause_time & PTV_PT_MASK;
+ iowrite32be(ptv, &regs->ptv);
+
+ /* trigger the transmission of a flow-control pause frame */
+ iowrite32be(ioread32be(&regs->maccfg1) | MACCFG1_TX_FLOW,
+ &regs->maccfg1);
+ } else
+ iowrite32be(ioread32be(&regs->maccfg1) & ~MACCFG1_TX_FLOW,
+ &regs->maccfg1);
+
+ return 0;
+}
+
+int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en)
+{
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ u32 tmp;
+
+ if (!is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ tmp = ioread32be(&regs->maccfg1);
+ if (en)
+ tmp |= MACCFG1_RX_FLOW;
+ else
+ tmp &= ~MACCFG1_RX_FLOW;
+ iowrite32be(tmp, &regs->maccfg1);
+
+ return 0;
+}
+
+int dtsec_modify_mac_address(struct fman_mac *dtsec, enet_addr_t *enet_addr)
+{
+ if (!is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ /* Initialize MAC Station Address registers (1 & 2)
+ * Station address have to be swapped (big endian to little endian
+ */
+ dtsec->addr = ENET_ADDR_TO_UINT64(*enet_addr);
+ set_mac_address(dtsec->regs, (u8 *)(*enet_addr));
+
+ return 0;
+}
+
+int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
+{
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ struct eth_hash_entry *hash_entry;
+ u64 addr;
+ s32 bucket;
+ u32 crc = 0xFFFFFFFF;
+ bool mcast, ghtx;
+
+ if (!is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ addr = ENET_ADDR_TO_UINT64(*eth_addr);
+
+ ghtx = (bool)((ioread32be(&regs->rctrl) & RCTRL_GHTX) ? true : false);
+ mcast = (bool)((addr & MAC_GROUP_ADDRESS) ? true : false);
+
+ /* Cannot handle unicast mac addr when GHTX is on */
+ if (ghtx && !mcast) {
+ pr_err("Could not compute hash bucket\n");
+ return -EINVAL;
+ }
+ crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
+ crc = bitrev32(crc);
+
+ /* considering the 9 highest order bits in crc H[8:0]:
+ *if ghtx = 0 H[8:6] (highest order 3 bits) identify the hash register
+ *and H[5:1] (next 5 bits) identify the hash bit
+ *if ghts = 1 H[8:5] (highest order 4 bits) identify the hash register
+ *and H[4:0] (next 5 bits) identify the hash bit.
+ *
+ *In bucket index output the low 5 bits identify the hash register
+ *bit, while the higher 4 bits identify the hash register
+ */
+
+ if (ghtx) {
+ bucket = (s32)((crc >> 23) & 0x1ff);
+ } else {
+ bucket = (s32)((crc >> 24) & 0xff);
+ /* if !ghtx and mcast the bit must be set in gaddr instead of
+ *igaddr.
+ */
+ if (mcast)
+ bucket += 0x100;
+ }
+
+ set_bucket(dtsec->regs, bucket, true);
+
+ /* Create element to be added to the driver hash table */
+ hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL);
+ if (!hash_entry)
+ return -ENOMEM;
+ hash_entry->addr = addr;
+ INIT_LIST_HEAD(&hash_entry->node);
+
+ if (addr & MAC_GROUP_ADDRESS)
+ /* Group Address */
+ list_add_tail(&hash_entry->node,
+ &dtsec->multicast_addr_hash->lsts[bucket]);
+ else
+ list_add_tail(&hash_entry->node,
+ &dtsec->unicast_addr_hash->lsts[bucket]);
+
+ return 0;
+}
+
+int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
+{
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ struct list_head *pos;
+ struct eth_hash_entry *hash_entry = NULL;
+ u64 addr;
+ s32 bucket;
+ u32 crc = 0xFFFFFFFF;
+ bool mcast, ghtx;
+
+ if (!is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ addr = ENET_ADDR_TO_UINT64(*eth_addr);
+
+ ghtx = (bool)((ioread32be(&regs->rctrl) & RCTRL_GHTX) ? true : false);
+ mcast = (bool)((addr & MAC_GROUP_ADDRESS) ? true : false);
+
+ /* Cannot handle unicast mac addr when GHTX is on */
+ if (ghtx && !mcast) {
+ pr_err("Could not compute hash bucket\n");
+ return -EINVAL;
+ }
+ crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
+ crc = bitrev32(crc);
+
+ if (ghtx) {
+ bucket = (s32)((crc >> 23) & 0x1ff);
+ } else {
+ bucket = (s32)((crc >> 24) & 0xff);
+ /* if !ghtx and mcast the bit must be set
+ * in gaddr instead of igaddr.
+ */
+ if (mcast)
+ bucket += 0x100;
+ }
+
+ if (addr & MAC_GROUP_ADDRESS) {
+ /* Group Address */
+ list_for_each(pos,
+ &dtsec->multicast_addr_hash->lsts[bucket]) {
+ hash_entry = ETH_HASH_ENTRY_OBJ(pos);
+ if (hash_entry->addr == addr) {
+ list_del_init(&hash_entry->node);
+ kfree(hash_entry);
+ break;
+ }
+ }
+ if (list_empty(&dtsec->multicast_addr_hash->lsts[bucket]))
+ set_bucket(dtsec->regs, bucket, false);
+ } else {
+ /* Individual Address */
+ list_for_each(pos,
+ &dtsec->unicast_addr_hash->lsts[bucket]) {
+ hash_entry = ETH_HASH_ENTRY_OBJ(pos);
+ if (hash_entry->addr == addr) {
+ list_del_init(&hash_entry->node);
+ kfree(hash_entry);
+ break;
+ }
+ }
+ if (list_empty(&dtsec->unicast_addr_hash->lsts[bucket]))
+ set_bucket(dtsec->regs, bucket, false);
+ }
+
+ /* address does not exist */
+ WARN_ON(!hash_entry);
+
+ return 0;
+}
+
+int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val)
+{
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ u32 tmp;
+
+ if (!is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ /* Set unicast promiscuous */
+ tmp = ioread32be(&regs->rctrl);
+ if (new_val)
+ tmp |= RCTRL_UPROM;
+ else
+ tmp &= ~RCTRL_UPROM;
+
+ iowrite32be(tmp, &regs->rctrl);
+
+ /* Set multicast promiscuous */
+ tmp = ioread32be(&regs->rctrl);
+ if (new_val)
+ tmp |= RCTRL_MPROM;
+ else
+ tmp &= ~RCTRL_MPROM;
+
+ iowrite32be(tmp, &regs->rctrl);
+
+ return 0;
+}
+
+int dtsec_adjust_link(struct fman_mac *dtsec, u16 speed)
+{
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ u32 tmp;
+
+ if (!is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ tmp = ioread32be(&regs->maccfg2);
+
+ /* Full Duplex */
+ tmp |= MACCFG2_FULL_DUPLEX;
+
+ tmp &= ~(MACCFG2_NIBBLE_MODE | MACCFG2_BYTE_MODE);
+ if (speed < SPEED_1000)
+ tmp |= MACCFG2_NIBBLE_MODE;
+ else if (speed == SPEED_1000)
+ tmp |= MACCFG2_BYTE_MODE;
+ iowrite32be(tmp, &regs->maccfg2);
+
+ tmp = ioread32be(&regs->ecntrl);
+ if (speed == SPEED_100)
+ tmp |= DTSEC_ECNTRL_R100M;
+ else
+ tmp &= ~DTSEC_ECNTRL_R100M;
+ iowrite32be(tmp, &regs->ecntrl);
+
+ return 0;
+}
+
+int dtsec_restart_autoneg(struct fman_mac *dtsec)
+{
+ u16 tmp_reg16;
+
+ if (!is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ tmp_reg16 = phy_read(dtsec->tbiphy, MII_BMCR);
+
+ tmp_reg16 &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
+ tmp_reg16 |= (BMCR_ANENABLE | BMCR_ANRESTART |
+ BMCR_FULLDPLX | BMCR_SPEED1000);
+
+ phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
+
+ return 0;
+}
+
+int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version)
+{
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+
+ if (!is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ *mac_version = ioread32be(&regs->tsec_id);
+
+ return 0;
+}
+
+int dtsec_set_exception(struct fman_mac *dtsec,
+ enum fman_mac_exceptions exception, bool enable)
+{
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ u32 bit_mask = 0;
+
+ if (!is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ if (exception != FM_MAC_EX_1G_1588_TS_RX_ERR) {
+ bit_mask = get_exception_flag(exception);
+ if (bit_mask) {
+ if (enable)
+ dtsec->exceptions |= bit_mask;
+ else
+ dtsec->exceptions &= ~bit_mask;
+ } else {
+ pr_err("Undefined exception\n");
+ return -EINVAL;
+ }
+ if (enable)
+ iowrite32be(ioread32be(&regs->imask) | bit_mask,
+ &regs->imask);
+ else
+ iowrite32be(ioread32be(&regs->imask) & ~bit_mask,
+ &regs->imask);
+ } else {
+ if (!dtsec->ptp_tsu_enabled) {
+ pr_err("Exception valid for 1588 only\n");
+ return -EINVAL;
+ }
+ switch (exception) {
+ case FM_MAC_EX_1G_1588_TS_RX_ERR:
+ if (enable) {
+ dtsec->en_tsu_err_exeption = true;
+ iowrite32be(ioread32be(&regs->tmr_pemask) |
+ TMR_PEMASK_TSREEN,
+ &regs->tmr_pemask);
+ } else {
+ dtsec->en_tsu_err_exeption = false;
+ iowrite32be(ioread32be(&regs->tmr_pemask) &
+ ~TMR_PEMASK_TSREEN,
+ &regs->tmr_pemask);
+ }
+ break;
+ default:
+ pr_err("Undefined exception\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int dtsec_init(struct fman_mac *dtsec)
+{
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ struct dtsec_cfg *dtsec_drv_param;
+ int err;
+ u16 max_frm_ln;
+ enet_addr_t eth_addr;
+
+ if (is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ if (DEFAULT_RESET_ON_INIT &&
+ (fman_reset_mac(dtsec->fm, dtsec->mac_id) != 0)) {
+ pr_err("Can't reset MAC!\n");
+ return -EINVAL;
+ }
+
+ err = check_init_parameters(dtsec);
+ if (err)
+ return err;
+
+ dtsec_drv_param = dtsec->dtsec_drv_param;
+
+ MAKE_ENET_ADDR_FROM_UINT64(dtsec->addr, eth_addr);
+
+ err = init(dtsec->regs, dtsec_drv_param, dtsec->phy_if,
+ dtsec->max_speed, (u8 *)eth_addr, dtsec->exceptions,
+ dtsec->tbiphy->addr);
+ if (err) {
+ free_init_resources(dtsec);
+ pr_err("DTSEC version doesn't support this i/f mode\n");
+ return err;
+ }
+
+ if (dtsec->phy_if == PHY_INTERFACE_MODE_SGMII) {
+ u16 tmp_reg16;
+
+ /* Configure the TBI PHY Control Register */
+ tmp_reg16 = TBICON_CLK_SELECT | TBICON_SOFT_RESET;
+ phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16);
+
+ tmp_reg16 = TBICON_CLK_SELECT;
+ phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16);
+
+ tmp_reg16 = (BMCR_RESET | BMCR_ANENABLE |
+ BMCR_FULLDPLX | BMCR_SPEED1000);
+ phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
+
+ if (dtsec->basex_if)
+ tmp_reg16 = TBIANA_1000X;
+ else
+ tmp_reg16 = TBIANA_SGMII;
+ phy_write(dtsec->tbiphy, MII_ADVERTISE, tmp_reg16);
+
+ tmp_reg16 = (BMCR_ANENABLE | BMCR_ANRESTART |
+ BMCR_FULLDPLX | BMCR_SPEED1000);
+
+ phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
+ }
+
+ /* Max Frame Length */
+ max_frm_ln = (u16)ioread32be(&regs->maxfrm);
+ err = fman_set_mac_max_frame(dtsec->fm, dtsec->mac_id, max_frm_ln);
+ if (err) {
+ pr_err("Setting max frame length failed\n");
+ free_init_resources(dtsec);
+ return -EINVAL;
+ }
+
+ dtsec->multicast_addr_hash =
+ alloc_hash_table(EXTENDED_HASH_TABLE_SIZE);
+ if (!dtsec->multicast_addr_hash) {
+ free_init_resources(dtsec);
+ pr_err("MC hash table is failed\n");
+ return -ENOMEM;
+ }
+
+ dtsec->unicast_addr_hash = alloc_hash_table(DTSEC_HASH_TABLE_SIZE);
+ if (!dtsec->unicast_addr_hash) {
+ free_init_resources(dtsec);
+ pr_err("UC hash table is failed\n");
+ return -ENOMEM;
+ }
+
+ /* register err intr handler for dtsec to FPM (err) */
+ fman_register_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
+ FMAN_INTR_TYPE_ERR, dtsec_isr, dtsec);
+ /* register 1588 intr handler for TMR to FPM (normal) */
+ fman_register_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
+ FMAN_INTR_TYPE_NORMAL, dtsec_1588_isr, dtsec);
+
+ kfree(dtsec_drv_param);
+ dtsec->dtsec_drv_param = NULL;
+
+ return 0;
+}
+
+int dtsec_free(struct fman_mac *dtsec)
+{
+ free_init_resources(dtsec);
+
+ kfree(dtsec->dtsec_drv_param);
+ dtsec->dtsec_drv_param = NULL;
+ kfree(dtsec);
+
+ return 0;
+}
+
+struct fman_mac *dtsec_config(struct fman_mac_params *params)
+{
+ struct fman_mac *dtsec;
+ struct dtsec_cfg *dtsec_drv_param;
+ void __iomem *base_addr;
+
+ base_addr = params->base_addr;
+
+ /* allocate memory for the UCC GETH data structure. */
+ dtsec = kzalloc(sizeof(*dtsec), GFP_KERNEL);
+ if (!dtsec)
+ return NULL;
+
+ /* allocate memory for the d_tsec driver parameters data structure. */
+ dtsec_drv_param = kzalloc(sizeof(*dtsec_drv_param), GFP_KERNEL);
+ if (!dtsec_drv_param)
+ goto err_dtsec;
+
+ /* Plant parameter structure pointer */
+ dtsec->dtsec_drv_param = dtsec_drv_param;
+
+ set_dflts(dtsec_drv_param);
+
+ dtsec->regs = base_addr;
+ dtsec->addr = ENET_ADDR_TO_UINT64(params->addr);
+ dtsec->max_speed = params->max_speed;
+ dtsec->phy_if = params->phy_if;
+ dtsec->mac_id = params->mac_id;
+ dtsec->exceptions = (DTSEC_IMASK_BREN |
+ DTSEC_IMASK_RXCEN |
+ DTSEC_IMASK_BTEN |
+ DTSEC_IMASK_TXCEN |
+ DTSEC_IMASK_TXEEN |
+ DTSEC_IMASK_ABRTEN |
+ DTSEC_IMASK_LCEN |
+ DTSEC_IMASK_CRLEN |
+ DTSEC_IMASK_XFUNEN |
+ DTSEC_IMASK_IFERREN |
+ DTSEC_IMASK_MAGEN |
+ DTSEC_IMASK_TDPEEN |
+ DTSEC_IMASK_RDPEEN);
+ dtsec->exception_cb = params->exception_cb;
+ dtsec->event_cb = params->event_cb;
+ dtsec->dev_id = params->dev_id;
+ dtsec->ptp_tsu_enabled = dtsec->dtsec_drv_param->ptp_tsu_en;
+ dtsec->en_tsu_err_exeption = dtsec->dtsec_drv_param->ptp_exception_en;
+
+ dtsec->fm = params->fm;
+ dtsec->basex_if = params->basex_if;
+
+ if (!params->internal_phy_node) {
+ pr_err("TBI PHY node is not available\n");
+ goto err_dtsec_drv_param;
+ }
+
+ dtsec->tbiphy = of_phy_find_device(params->internal_phy_node);
+ if (!dtsec->tbiphy) {
+ pr_err("of_phy_find_device (TBI PHY) failed\n");
+ put_device(&dtsec->tbiphy->dev);
+ goto err_dtsec_drv_param;
+ }
+
+ put_device(&dtsec->tbiphy->dev);
+
+ /* Save FMan revision */
+ fman_get_revision(dtsec->fm, &dtsec->fm_rev_info);
+
+ return dtsec;
+
+err_dtsec_drv_param:
+ kfree(dtsec_drv_param);
+err_dtsec:
+ kfree(dtsec);
+ return NULL;
+}
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.h b/drivers/net/ethernet/freescale/fman/fman_dtsec.h
new file mode 100644
index 0000000..c4467c0
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2008-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DTSEC_H
+#define __DTSEC_H
+
+#include "fman_mac.h"
+
+struct fman_mac *dtsec_config(struct fman_mac_params *params);
+int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val);
+int dtsec_modify_mac_address(struct fman_mac *dtsec, enet_addr_t *enet_addr);
+int dtsec_adjust_link(struct fman_mac *dtsec,
+ u16 speed);
+int dtsec_restart_autoneg(struct fman_mac *dtsec);
+int dtsec_cfg_max_frame_len(struct fman_mac *dtsec, u16 new_val);
+int dtsec_cfg_pad_and_crc(struct fman_mac *dtsec, bool new_val);
+int dtsec_enable(struct fman_mac *dtsec, enum comm_mode mode);
+int dtsec_disable(struct fman_mac *dtsec, enum comm_mode mode);
+int dtsec_init(struct fman_mac *dtsec);
+int dtsec_free(struct fman_mac *dtsec);
+int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en);
+int dtsec_set_tx_pause_frames(struct fman_mac *dtsec, u8 priority,
+ u16 pause_time, u16 thresh_time);
+int dtsec_set_exception(struct fman_mac *dtsec,
+ enum fman_mac_exceptions exception, bool enable);
+int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr);
+int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr);
+int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version);
+
+#endif /* __DTSEC_H */
diff --git a/drivers/net/ethernet/freescale/fman/fman_mac.h b/drivers/net/ethernet/freescale/fman/fman_mac.h
new file mode 100644
index 0000000..8ddeedb
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fman/fman_mac.h
@@ -0,0 +1,278 @@
+/*
+ * Copyright 2008-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* FM MAC ... */
+#ifndef __FM_MAC_H
+#define __FM_MAC_H
+
+#include "fman.h"
+
+#include <linux/slab.h>
+#include <linux/phy.h>
+#include <linux/if_ether.h>
+
+struct fman_mac;
+
+/* Ethernet Address */
+typedef u8 enet_addr_t[ETH_ALEN];
+
+#define ENET_ADDR_TO_UINT64(_enet_addr) \
+ (u64)(((u64)(_enet_addr)[0] << 40) | \
+ ((u64)(_enet_addr)[1] << 32) | \
+ ((u64)(_enet_addr)[2] << 24) | \
+ ((u64)(_enet_addr)[3] << 16) | \
+ ((u64)(_enet_addr)[4] << 8) | \
+ ((u64)(_enet_addr)[5]))
+
+#define MAKE_ENET_ADDR_FROM_UINT64(_addr64, _enet_addr) \
+ do { \
+ int i; \
+ for (i = 0; i < ETH_ALEN; i++) \
+ (_enet_addr)[i] = \
+ (u8)((_addr64) >> ((5 - i) * 8)); \
+ } while (0)
+
+/* defaults */
+#define DEFAULT_RESET_ON_INIT false
+
+/* PFC defines */
+#define FSL_FM_PAUSE_TIME_ENABLE 0xf000
+#define FSL_FM_PAUSE_TIME_DISABLE 0
+#define FSL_FM_PAUSE_THRESH_DEFAULT 0
+
+#define FM_MAC_NO_PFC 0xff
+
+/* HASH defines */
+#define ETH_HASH_ENTRY_OBJ(ptr) \
+ hlist_entry_safe(ptr, struct eth_hash_entry, node)
+
+/* Enumeration (bit flags) of communication modes (Transmit,
+ * receive or both).
+ */
+enum comm_mode {
+ COMM_MODE_NONE = 0, /* No transmit/receive communication */
+ COMM_MODE_RX = 1, /* Only receive communication */
+ COMM_MODE_TX = 2, /* Only transmit communication */
+ COMM_MODE_RX_AND_TX = 3 /* Both transmit and receive communication */
+};
+
+/* FM MAC Exceptions */
+enum fman_mac_exceptions {
+ FM_MAC_EX_10G_MDIO_SCAN_EVENT = 0
+ /* 10GEC MDIO scan event interrupt */
+ , FM_MAC_EX_10G_MDIO_CMD_CMPL
+ /* 10GEC MDIO command completion interrupt */
+ , FM_MAC_EX_10G_REM_FAULT
+ /* 10GEC, mEMAC Remote fault interrupt */
+ , FM_MAC_EX_10G_LOC_FAULT
+ /* 10GEC, mEMAC Local fault interrupt */
+ , FM_MAC_EX_10G_TX_ECC_ER
+ /* 10GEC, mEMAC Transmit frame ECC error interrupt */
+ , FM_MAC_EX_10G_TX_FIFO_UNFL
+ /* 10GEC, mEMAC Transmit FIFO underflow interrupt */
+ , FM_MAC_EX_10G_TX_FIFO_OVFL
+ /* 10GEC, mEMAC Transmit FIFO overflow interrupt */
+ , FM_MAC_EX_10G_TX_ER
+ /* 10GEC Transmit frame error interrupt */
+ , FM_MAC_EX_10G_RX_FIFO_OVFL
+ /* 10GEC, mEMAC Receive FIFO overflow interrupt */
+ , FM_MAC_EX_10G_RX_ECC_ER
+ /* 10GEC, mEMAC Receive frame ECC error interrupt */
+ , FM_MAC_EX_10G_RX_JAB_FRM
+ /* 10GEC Receive jabber frame interrupt */
+ , FM_MAC_EX_10G_RX_OVRSZ_FRM
+ /* 10GEC Receive oversized frame interrupt */
+ , FM_MAC_EX_10G_RX_RUNT_FRM
+ /* 10GEC Receive runt frame interrupt */
+ , FM_MAC_EX_10G_RX_FRAG_FRM
+ /* 10GEC Receive fragment frame interrupt */
+ , FM_MAC_EX_10G_RX_LEN_ER
+ /* 10GEC Receive payload length error interrupt */
+ , FM_MAC_EX_10G_RX_CRC_ER
+ /* 10GEC Receive CRC error interrupt */
+ , FM_MAC_EX_10G_RX_ALIGN_ER
+ /* 10GEC Receive alignment error interrupt */
+ , FM_MAC_EX_1G_BAB_RX
+ /* dTSEC Babbling receive error */
+ , FM_MAC_EX_1G_RX_CTL
+ /* dTSEC Receive control (pause frame) interrupt */
+ , FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET
+ /* dTSEC Graceful transmit stop complete */
+ , FM_MAC_EX_1G_BAB_TX
+ /* dTSEC Babbling transmit error */
+ , FM_MAC_EX_1G_TX_CTL
+ /* dTSEC Transmit control (pause frame) interrupt */
+ , FM_MAC_EX_1G_TX_ERR
+ /* dTSEC Transmit error */
+ , FM_MAC_EX_1G_LATE_COL
+ /* dTSEC Late collision */
+ , FM_MAC_EX_1G_COL_RET_LMT
+ /* dTSEC Collision retry limit */
+ , FM_MAC_EX_1G_TX_FIFO_UNDRN
+ /* dTSEC Transmit FIFO underrun */
+ , FM_MAC_EX_1G_MAG_PCKT
+ /* dTSEC Magic Packet detection */
+ , FM_MAC_EX_1G_MII_MNG_RD_COMPLET
+ /* dTSEC MII management read completion */
+ , FM_MAC_EX_1G_MII_MNG_WR_COMPLET
+ /* dTSEC MII management write completion */
+ , FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET
+ /* dTSEC Graceful receive stop complete */
+ , FM_MAC_EX_1G_DATA_ERR
+ /* dTSEC Internal data error on transmit */
+ , FM_MAC_1G_RX_DATA_ERR
+ /* dTSEC Internal data error on receive */
+ , FM_MAC_EX_1G_1588_TS_RX_ERR
+ /* dTSEC Time-Stamp Receive Error */
+ , FM_MAC_EX_1G_RX_MIB_CNT_OVFL
+ /* dTSEC MIB counter overflow */
+ , FM_MAC_EX_TS_FIFO_ECC_ERR
+ /* mEMAC Time-stamp FIFO ECC error interrupt;
+ * not supported on T4240/B4860 rev1 chips
+ */
+ , FM_MAC_EX_MAGIC_PACKET_INDICATION = FM_MAC_EX_1G_MAG_PCKT
+ /* mEMAC Magic Packet Indication Interrupt */
+};
+
+struct eth_hash_entry {
+ u64 addr; /* Ethernet Address */
+ struct list_head node;
+};
+
+typedef void (fman_mac_exception_cb)(void *dev_id,
+ enum fman_mac_exceptions exceptions);
+
+/* FMan MAC config input */
+struct fman_mac_params {
+ /* Base of memory mapped FM MAC registers */
+ void __iomem *base_addr;
+ /* MAC address of device; First octet is sent first */
+ enet_addr_t addr;
+ /* MAC ID; numbering of dTSEC and 1G-mEMAC:
+ * 0 - FM_MAX_NUM_OF_1G_MACS;
+ * numbering of 10G-MAC (TGEC) and 10G-mEMAC:
+ * 0 - FM_MAX_NUM_OF_10G_MACS
+ */
+ u8 mac_id;
+ /* PHY interface */
+ phy_interface_t phy_if;
+ /* Note that the speed should indicate the maximum rate that
+ * this MAC should support rather than the actual speed;
+ */
+ u16 max_speed;
+ /* A handle to the FM object this port related to */
+ void *fm;
+ /* MDIO exceptions interrupt source - not valid for all
+ * MACs; MUST be set to 'NO_IRQ' for MACs that don't have
+ * mdio-irq, or for polling
+ */
+ void *dev_id; /* device cookie used by the exception cbs */
+ fman_mac_exception_cb *event_cb; /* MDIO Events Callback Routine */
+ fman_mac_exception_cb *exception_cb;/* Exception Callback Routine */
+ /* SGMII/QSGII interface with 1000BaseX auto-negotiation between MAC
+ * and phy or backplane; Note: 1000BaseX auto-negotiation relates only
+ * to interface between MAC and phy/backplane, SGMII phy can still
+ * synchronize with far-end phy at 10Mbps, 100Mbps or 1000Mbps
+ */
+ bool basex_if;
+ /* Pointer to TBI/PCS PHY node, used for TBI/PCS PHY access */
+ struct device_node *internal_phy_node;
+};
+
+struct eth_hash_t {
+ u16 size;
+ struct list_head *lsts;
+};
+
+static inline struct eth_hash_entry
+*dequeue_addr_from_hash_entry(struct list_head *addr_lst)
+{
+ struct eth_hash_entry *hash_entry = NULL;
+
+ if (!list_empty(addr_lst)) {
+ hash_entry = ETH_HASH_ENTRY_OBJ(addr_lst->next);
+ list_del_init(&hash_entry->node);
+ }
+ return hash_entry;
+}
+
+static inline void free_hash_table(struct eth_hash_t *hash)
+{
+ struct eth_hash_entry *hash_entry;
+ int i = 0;
+
+ if (hash) {
+ if (hash->lsts) {
+ for (i = 0; i < hash->size; i++) {
+ hash_entry =
+ dequeue_addr_from_hash_entry(&hash->lsts[i]);
+ while (hash_entry) {
+ kfree(hash_entry);
+ hash_entry =
+ dequeue_addr_from_hash_entry(&hash->
+ lsts[i]);
+ }
+ }
+
+ kfree(hash->lsts);
+ }
+
+ kfree(hash);
+ }
+}
+
+static inline struct eth_hash_t *alloc_hash_table(u16 size)
+{
+ u32 i;
+ struct eth_hash_t *hash;
+
+ /* Allocate address hash table */
+ hash = kmalloc_array(size, sizeof(struct eth_hash_t *), GFP_KERNEL);
+ if (!hash)
+ return NULL;
+
+ hash->size = size;
+
+ hash->lsts = kmalloc_array(hash->size, sizeof(struct list_head),
+ GFP_KERNEL);
+ if (!hash->lsts) {
+ kfree(hash);
+ return NULL;
+ }
+
+ for (i = 0; i < hash->size; i++)
+ INIT_LIST_HEAD(&hash->lsts[i]);
+
+ return hash;
+}
+
+#endif /* __FM_MAC_H */
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
new file mode 100644
index 0000000..58bb720
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -0,0 +1,1170 @@
+/*
+ * Copyright 2008-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "fman_memac.h"
+#include "fman.h"
+
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/phy.h>
+#include <linux/of_mdio.h>
+
+/* PCS registers */
+#define MDIO_SGMII_CR 0x00
+#define MDIO_SGMII_DEV_ABIL_SGMII 0x04
+#define MDIO_SGMII_LINK_TMR_L 0x12
+#define MDIO_SGMII_LINK_TMR_H 0x13
+#define MDIO_SGMII_IF_MODE 0x14
+
+/* SGMII Control defines */
+#define SGMII_CR_AN_EN 0x1000
+#define SGMII_CR_RESTART_AN 0x0200
+#define SGMII_CR_FD 0x0100
+#define SGMII_CR_SPEED_SEL1_1G 0x0040
+#define SGMII_CR_DEF_VAL (SGMII_CR_AN_EN | SGMII_CR_FD | \
+ SGMII_CR_SPEED_SEL1_1G)
+
+/* SGMII Device Ability for SGMII defines */
+#define MDIO_SGMII_DEV_ABIL_SGMII_MODE 0x4001
+#define MDIO_SGMII_DEV_ABIL_BASEX_MODE 0x01A0
+
+/* Link timer define */
+#define LINK_TMR_L 0xa120
+#define LINK_TMR_H 0x0007
+#define LINK_TMR_L_BASEX 0xaf08
+#define LINK_TMR_H_BASEX 0x002f
+
+/* SGMII IF Mode defines */
+#define IF_MODE_USE_SGMII_AN 0x0002
+#define IF_MODE_SGMII_EN 0x0001
+#define IF_MODE_SGMII_SPEED_100M 0x0004
+#define IF_MODE_SGMII_SPEED_1G 0x0008
+#define IF_MODE_SGMII_DUPLEX_HALF 0x0010
+
+/* Num of additional exact match MAC adr regs */
+#define MEMAC_NUM_OF_PADDRS 7
+
+/* Control and Configuration Register (COMMAND_CONFIG) */
+#define CMD_CFG_REG_LOWP_RXETY 0x01000000 /* 07 Rx low power indication */
+#define CMD_CFG_TX_LOWP_ENA 0x00800000 /* 08 Tx Low Power Idle Enable */
+#define CMD_CFG_PFC_MODE 0x00080000 /* 12 Enable PFC */
+#define CMD_CFG_NO_LEN_CHK 0x00020000 /* 14 Payload length check disable */
+#define CMD_CFG_SW_RESET 0x00001000 /* 19 S/W Reset, self clearing bit */
+#define CMD_CFG_TX_PAD_EN 0x00000800 /* 20 Enable Tx padding of frames */
+#define CMD_CFG_PAUSE_IGNORE 0x00000100 /* 23 Ignore Pause frame quanta */
+#define CMD_CFG_CRC_FWD 0x00000040 /* 25 Terminate/frwd CRC of frames */
+#define CMD_CFG_PAD_EN 0x00000020 /* 26 Frame padding removal */
+#define CMD_CFG_PROMIS_EN 0x00000010 /* 27 Promiscuous operation enable */
+#define CMD_CFG_RX_EN 0x00000002 /* 30 MAC receive path enable */
+#define CMD_CFG_TX_EN 0x00000001 /* 31 MAC transmit path enable */
+
+/* Transmit FIFO Sections Register (TX_FIFO_SECTIONS) */
+#define TX_FIFO_SECTIONS_TX_EMPTY_MASK 0xFFFF0000
+#define TX_FIFO_SECTIONS_TX_AVAIL_MASK 0x0000FFFF
+#define TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G 0x00400000
+#define TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_1G 0x00100000
+#define TX_FIFO_SECTIONS_TX_AVAIL_10G 0x00000019
+#define TX_FIFO_SECTIONS_TX_AVAIL_1G 0x00000020
+#define TX_FIFO_SECTIONS_TX_AVAIL_SLOW_10G 0x00000060
+
+#define GET_TX_EMPTY_DEFAULT_VALUE(_val) \
+do { \
+ _val &= ~TX_FIFO_SECTIONS_TX_EMPTY_MASK; \
+ ((_val == TX_FIFO_SECTIONS_TX_AVAIL_10G) ? \
+ (_val |= TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G) :\
+ (_val |= TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_1G));\
+} while (0)
+
+/* Interface Mode Register (IF_MODE) */
+
+#define IF_MODE_MASK 0x00000003 /* 30-31 Mask on i/f mode bits */
+#define IF_MODE_XGMII 0x00000000 /* 30-31 XGMII (10G) interface */
+#define IF_MODE_GMII 0x00000002 /* 30-31 GMII (1G) interface */
+#define IF_MODE_RGMII 0x00000004
+#define IF_MODE_RGMII_AUTO 0x00008000
+#define IF_MODE_RGMII_1000 0x00004000 /* 10 - 1000Mbps RGMII */
+#define IF_MODE_RGMII_100 0x00000000 /* 00 - 100Mbps RGMII */
+#define IF_MODE_RGMII_10 0x00002000 /* 01 - 10Mbps RGMII */
+#define IF_MODE_RGMII_SP_MASK 0x00006000 /* Setsp mask bits */
+#define IF_MODE_RGMII_FD 0x00001000 /* Full duplex RGMII */
+#define IF_MODE_HD 0x00000040 /* Half duplex operation */
+
+/* Hash table Control Register (HASHTABLE_CTRL) */
+#define HASH_CTRL_MCAST_EN 0x00000100
+/* 26-31 Hash table address code */
+#define HASH_CTRL_ADDR_MASK 0x0000003F
+/* MAC mcast indication */
+#define GROUP_ADDRESS 0x0000010000000000LL
+#define HASH_TABLE_SIZE 64 /* Hash tbl size */
+
+/* Interrupt Mask Register (IMASK) */
+#define MEMAC_IMASK_MGI 0x40000000 /* 1 Magic pkt detect indication */
+#define MEMAC_IMASK_TSECC_ER 0x20000000 /* 2 Timestamp FIFO ECC error evnt */
+#define MEMAC_IMASK_TECC_ER 0x02000000 /* 6 Transmit frame ECC error evnt */
+#define MEMAC_IMASK_RECC_ER 0x01000000 /* 7 Receive frame ECC error evnt */
+
+#define MEMAC_ALL_ERRS_IMASK \
+ ((u32)(MEMAC_IMASK_TSECC_ER | \
+ MEMAC_IMASK_TECC_ER | \
+ MEMAC_IMASK_RECC_ER | \
+ MEMAC_IMASK_MGI))
+
+#define MEMAC_IEVNT_PCS 0x80000000 /* PCS (XG). Link sync (G) */
+#define MEMAC_IEVNT_AN 0x40000000 /* Auto-negotiation */
+#define MEMAC_IEVNT_LT 0x20000000 /* Link Training/New page */
+#define MEMAC_IEVNT_MGI 0x00004000 /* Magic pkt detection */
+#define MEMAC_IEVNT_TS_ECC_ER 0x00002000 /* Timestamp FIFO ECC error*/
+#define MEMAC_IEVNT_RX_FIFO_OVFL 0x00001000 /* Rx FIFO overflow */
+#define MEMAC_IEVNT_TX_FIFO_UNFL 0x00000800 /* Tx FIFO underflow */
+#define MEMAC_IEVNT_TX_FIFO_OVFL 0x00000400 /* Tx FIFO overflow */
+#define MEMAC_IEVNT_TX_ECC_ER 0x00000200 /* Tx frame ECC error */
+#define MEMAC_IEVNT_RX_ECC_ER 0x00000100 /* Rx frame ECC error */
+#define MEMAC_IEVNT_LI_FAULT 0x00000080 /* Link Interruption flt */
+#define MEMAC_IEVNT_RX_EMPTY 0x00000040 /* Rx FIFO empty */
+#define MEMAC_IEVNT_TX_EMPTY 0x00000020 /* Tx FIFO empty */
+#define MEMAC_IEVNT_RX_LOWP 0x00000010 /* Low Power Idle */
+#define MEMAC_IEVNT_PHY_LOS 0x00000004 /* Phy loss of signal */
+#define MEMAC_IEVNT_REM_FAULT 0x00000002 /* Remote fault (XGMII) */
+#define MEMAC_IEVNT_LOC_FAULT 0x00000001 /* Local fault (XGMII) */
+
+#define DEFAULT_PAUSE_QUANTA 0xf000
+#define DEFAULT_FRAME_LENGTH 0x600
+#define DEFAULT_TX_IPG_LENGTH 12
+
+#define CLXY_PAUSE_QUANTA_CLX_PQNT 0x0000FFFF
+#define CLXY_PAUSE_QUANTA_CLY_PQNT 0xFFFF0000
+#define CLXY_PAUSE_THRESH_CLX_QTH 0x0000FFFF
+#define CLXY_PAUSE_THRESH_CLY_QTH 0xFFFF0000
+
+struct mac_addr {
+ /* Lower 32 bits of 48-bit MAC address */
+ u32 mac_addr_l;
+ /* Upper 16 bits of 48-bit MAC address */
+ u32 mac_addr_u;
+};
+
+/* memory map */
+struct memac_regs {
+ u32 res0000[2]; /* General Control and Status */
+ u32 command_config; /* 0x008 Ctrl and cfg */
+ struct mac_addr mac_addr0; /* 0x00C-0x010 MAC_ADDR_0...1 */
+ u32 maxfrm; /* 0x014 Max frame length */
+ u32 res0018[1];
+ u32 rx_fifo_sections; /* Receive FIFO configuration reg */
+ u32 tx_fifo_sections; /* Transmit FIFO configuration reg */
+ u32 res0024[2];
+ u32 hashtable_ctrl; /* 0x02C Hash table control */
+ u32 res0030[4];
+ u32 ievent; /* 0x040 Interrupt event */
+ u32 tx_ipg_length; /* 0x044 Transmitter inter-packet-gap */
+ u32 res0048;
+ u32 imask; /* 0x04C Interrupt mask */
+ u32 res0050;
+ u32 pause_quanta[4]; /* 0x054 Pause quanta */
+ u32 pause_thresh[4]; /* 0x064 Pause quanta threshold */
+ u32 rx_pause_status; /* 0x074 Receive pause status */
+ u32 res0078[2];
+ struct mac_addr mac_addr[MEMAC_NUM_OF_PADDRS];/* 0x80-0x0B4 mac padr */
+ u32 lpwake_timer; /* 0x0B8 Low Power Wakeup Timer */
+ u32 sleep_timer; /* 0x0BC Transmit EEE Low Power Timer */
+ u32 res00c0[8];
+ u32 statn_config; /* 0x0E0 Statistics configuration */
+ u32 res00e4[7];
+ /* Rx Statistics Counter */
+ u32 reoct_l;
+ u32 reoct_u;
+ u32 roct_l;
+ u32 roct_u;
+ u32 raln_l;
+ u32 raln_u;
+ u32 rxpf_l;
+ u32 rxpf_u;
+ u32 rfrm_l;
+ u32 rfrm_u;
+ u32 rfcs_l;
+ u32 rfcs_u;
+ u32 rvlan_l;
+ u32 rvlan_u;
+ u32 rerr_l;
+ u32 rerr_u;
+ u32 ruca_l;
+ u32 ruca_u;
+ u32 rmca_l;
+ u32 rmca_u;
+ u32 rbca_l;
+ u32 rbca_u;
+ u32 rdrp_l;
+ u32 rdrp_u;
+ u32 rpkt_l;
+ u32 rpkt_u;
+ u32 rund_l;
+ u32 rund_u;
+ u32 r64_l;
+ u32 r64_u;
+ u32 r127_l;
+ u32 r127_u;
+ u32 r255_l;
+ u32 r255_u;
+ u32 r511_l;
+ u32 r511_u;
+ u32 r1023_l;
+ u32 r1023_u;
+ u32 r1518_l;
+ u32 r1518_u;
+ u32 r1519x_l;
+ u32 r1519x_u;
+ u32 rovr_l;
+ u32 rovr_u;
+ u32 rjbr_l;
+ u32 rjbr_u;
+ u32 rfrg_l;
+ u32 rfrg_u;
+ u32 rcnp_l;
+ u32 rcnp_u;
+ u32 rdrntp_l;
+ u32 rdrntp_u;
+ u32 res01d0[12];
+ /* Tx Statistics Counter */
+ u32 teoct_l;
+ u32 teoct_u;
+ u32 toct_l;
+ u32 toct_u;
+ u32 res0210[2];
+ u32 txpf_l;
+ u32 txpf_u;
+ u32 tfrm_l;
+ u32 tfrm_u;
+ u32 tfcs_l;
+ u32 tfcs_u;
+ u32 tvlan_l;
+ u32 tvlan_u;
+ u32 terr_l;
+ u32 terr_u;
+ u32 tuca_l;
+ u32 tuca_u;
+ u32 tmca_l;
+ u32 tmca_u;
+ u32 tbca_l;
+ u32 tbca_u;
+ u32 res0258[2];
+ u32 tpkt_l;
+ u32 tpkt_u;
+ u32 tund_l;
+ u32 tund_u;
+ u32 t64_l;
+ u32 t64_u;
+ u32 t127_l;
+ u32 t127_u;
+ u32 t255_l;
+ u32 t255_u;
+ u32 t511_l;
+ u32 t511_u;
+ u32 t1023_l;
+ u32 t1023_u;
+ u32 t1518_l;
+ u32 t1518_u;
+ u32 t1519x_l;
+ u32 t1519x_u;
+ u32 res02a8[6];
+ u32 tcnp_l;
+ u32 tcnp_u;
+ u32 res02c8[14];
+ /* Line Interface Control */
+ u32 if_mode; /* 0x300 Interface Mode Control */
+ u32 if_status; /* 0x304 Interface Status */
+ u32 res0308[14];
+ /* HiGig/2 */
+ u32 hg_config; /* 0x340 Control and cfg */
+ u32 res0344[3];
+ u32 hg_pause_quanta; /* 0x350 Pause quanta */
+ u32 res0354[3];
+ u32 hg_pause_thresh; /* 0x360 Pause quanta threshold */
+ u32 res0364[3];
+ u32 hgrx_pause_status; /* 0x370 Receive pause status */
+ u32 hg_fifos_status; /* 0x374 fifos status */
+ u32 rhm; /* 0x378 rx messages counter */
+ u32 thm; /* 0x37C tx messages counter */
+};
+
+struct memac_cfg {
+ bool reset_on_init;
+ bool pause_ignore;
+ bool promiscuous_mode_enable;
+ struct fixed_phy_status *fixed_link;
+ u16 max_frame_length;
+ u16 pause_quanta;
+ u32 tx_ipg_length;
+};
+
+struct fman_mac {
+ /* Pointer to MAC memory mapped registers */
+ struct memac_regs __iomem *regs;
+ /* MAC address of device */
+ u64 addr;
+ /* Ethernet physical interface */
+ phy_interface_t phy_if;
+ u16 max_speed;
+ void *dev_id; /* device cookie used by the exception cbs */
+ fman_mac_exception_cb *exception_cb;
+ fman_mac_exception_cb *event_cb;
+ /* Pointer to driver's global address hash table */
+ struct eth_hash_t *multicast_addr_hash;
+ /* Pointer to driver's individual address hash table */
+ struct eth_hash_t *unicast_addr_hash;
+ u8 mac_id;
+ u32 exceptions;
+ struct memac_cfg *memac_drv_param;
+ void *fm;
+ struct fman_rev_info fm_rev_info;
+ bool basex_if;
+ struct phy_device *pcsphy;
+};
+
+static void add_addr_in_paddr(struct memac_regs __iomem *regs, u8 *adr,
+ u8 paddr_num)
+{
+ u32 tmp0, tmp1;
+
+ tmp0 = (u32)(adr[0] | adr[1] << 8 | adr[2] << 16 | adr[3] << 24);
+ tmp1 = (u32)(adr[4] | adr[5] << 8);
+
+ if (paddr_num == 0) {
+ iowrite32be(tmp0, &regs->mac_addr0.mac_addr_l);
+ iowrite32be(tmp1, &regs->mac_addr0.mac_addr_u);
+ } else {
+ iowrite32be(tmp0, &regs->mac_addr[paddr_num - 1].mac_addr_l);
+ iowrite32be(tmp1, &regs->mac_addr[paddr_num - 1].mac_addr_u);
+ }
+}
+
+static int reset(struct memac_regs __iomem *regs)
+{
+ u32 tmp;
+ int count;
+
+ tmp = ioread32be(&regs->command_config);
+
+ tmp |= CMD_CFG_SW_RESET;
+
+ iowrite32be(tmp, &regs->command_config);
+
+ count = 100;
+ do {
+ udelay(1);
+ } while ((ioread32be(&regs->command_config) & CMD_CFG_SW_RESET) &&
+ --count);
+
+ if (count == 0)
+ return -EBUSY;
+
+ return 0;
+}
+
+static void set_exception(struct memac_regs __iomem *regs, u32 val,
+ bool enable)
+{
+ u32 tmp;
+
+ tmp = ioread32be(&regs->imask);
+ if (enable)
+ tmp |= val;
+ else
+ tmp &= ~val;
+
+ iowrite32be(tmp, &regs->imask);
+}
+
+static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg,
+ phy_interface_t phy_if, u16 speed, bool slow_10g_if,
+ u32 exceptions)
+{
+ u32 tmp;
+
+ /* Config */
+ tmp = 0;
+ if (cfg->promiscuous_mode_enable)
+ tmp |= CMD_CFG_PROMIS_EN;
+ if (cfg->pause_ignore)
+ tmp |= CMD_CFG_PAUSE_IGNORE;
+
+ /* Payload length check disable */
+ tmp |= CMD_CFG_NO_LEN_CHK;
+ /* Enable padding of frames in transmit direction */
+ tmp |= CMD_CFG_TX_PAD_EN;
+
+ tmp |= CMD_CFG_CRC_FWD;
+
+ iowrite32be(tmp, &regs->command_config);
+
+ /* Max Frame Length */
+ iowrite32be((u32)cfg->max_frame_length, &regs->maxfrm);
+
+ /* Pause Time */
+ iowrite32be((u32)cfg->pause_quanta, &regs->pause_quanta[0]);
+ iowrite32be((u32)0, &regs->pause_thresh[0]);
+
+ /* IF_MODE */
+ tmp = 0;
+ switch (phy_if) {
+ case PHY_INTERFACE_MODE_XGMII:
+ tmp |= IF_MODE_XGMII;
+ break;
+ default:
+ tmp |= IF_MODE_GMII;
+ if (phy_if == PHY_INTERFACE_MODE_RGMII)
+ tmp |= IF_MODE_RGMII | IF_MODE_RGMII_AUTO;
+ }
+ iowrite32be(tmp, &regs->if_mode);
+
+ /* TX_FIFO_SECTIONS */
+ tmp = 0;
+ if (phy_if == PHY_INTERFACE_MODE_XGMII) {
+ if (slow_10g_if) {
+ tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_SLOW_10G |
+ TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G);
+ } else {
+ tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_10G |
+ TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G);
+ }
+ } else {
+ tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_1G |
+ TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_1G);
+ }
+ iowrite32be(tmp, &regs->tx_fifo_sections);
+
+ /* clear all pending events and set-up interrupts */
+ iowrite32be(0xffffffff, &regs->ievent);
+ set_exception(regs, exceptions, true);
+
+ return 0;
+}
+
+static void set_dflts(struct memac_cfg *cfg)
+{
+ cfg->reset_on_init = false;
+ cfg->promiscuous_mode_enable = false;
+ cfg->pause_ignore = false;
+ cfg->tx_ipg_length = DEFAULT_TX_IPG_LENGTH;
+ cfg->max_frame_length = DEFAULT_FRAME_LENGTH;
+ cfg->pause_quanta = DEFAULT_PAUSE_QUANTA;
+}
+
+static u32 get_mac_addr_hash_code(u64 eth_addr)
+{
+ u64 mask1, mask2;
+ u32 xor_val = 0;
+ u8 i, j;
+
+ for (i = 0; i < 6; i++) {
+ mask1 = eth_addr & (u64)0x01;
+ eth_addr >>= 1;
+
+ for (j = 0; j < 7; j++) {
+ mask2 = eth_addr & (u64)0x01;
+ mask1 ^= mask2;
+ eth_addr >>= 1;
+ }
+
+ xor_val |= (mask1 << (5 - i));
+ }
+
+ return xor_val;
+}
+
+static void setup_sgmii_internal_phy(struct fman_mac *memac,
+ struct fixed_phy_status *fixed_link)
+{
+ u16 tmp_reg16;
+
+ /* SGMII mode */
+ tmp_reg16 = IF_MODE_SGMII_EN;
+ if (!fixed_link)
+ /* AN enable */
+ tmp_reg16 |= IF_MODE_USE_SGMII_AN;
+ else {
+ switch (fixed_link->speed) {
+ case 10:
+ /* For 10M: IF_MODE[SPEED_10M] = 0 */
+ break;
+ case 100:
+ tmp_reg16 |= IF_MODE_SGMII_SPEED_100M;
+ break;
+ case 1000: /* fallthrough */
+ default:
+ tmp_reg16 |= IF_MODE_SGMII_SPEED_1G;
+ break;
+ }
+ if (!fixed_link->duplex)
+ tmp_reg16 |= IF_MODE_SGMII_DUPLEX_HALF;
+ }
+ phy_write(memac->pcsphy, MDIO_SGMII_IF_MODE, tmp_reg16);
+
+ /* Device ability according to SGMII specification */
+ tmp_reg16 = MDIO_SGMII_DEV_ABIL_SGMII_MODE;
+ phy_write(memac->pcsphy, MDIO_SGMII_DEV_ABIL_SGMII, tmp_reg16);
+
+ /* Adjust link timer for SGMII -
+ * According to Cisco SGMII specification the timer should be 1.6 ms.
+ * The link_timer register is configured in units of the clock.
+ * - When running as 1G SGMII, Serdes clock is 125 MHz, so
+ * unit = 1 / (125*10^6 Hz) = 8 ns.
+ * 1.6 ms in units of 8 ns = 1.6ms / 8ns = 2*10^5 = 0x30d40
+ * - When running as 2.5G SGMII, Serdes clock is 312.5 MHz, so
+ * unit = 1 / (312.5*10^6 Hz) = 3.2 ns.
+ * 1.6 ms in units of 3.2 ns = 1.6ms / 3.2ns = 5*10^5 = 0x7a120.
+ * Since link_timer value of 1G SGMII will be too short for 2.5 SGMII,
+ * we always set up here a value of 2.5 SGMII.
+ */
+ phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_H, LINK_TMR_H);
+ phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_L, LINK_TMR_L);
+
+ if (!fixed_link)
+ /* Restart AN */
+ tmp_reg16 = SGMII_CR_DEF_VAL | SGMII_CR_RESTART_AN;
+ else
+ /* AN disabled */
+ tmp_reg16 = SGMII_CR_DEF_VAL & ~SGMII_CR_AN_EN;
+ phy_write(memac->pcsphy, 0x0, tmp_reg16);
+}
+
+static void setup_sgmii_internal_phy_base_x(struct fman_mac *memac)
+{
+ u16 tmp_reg16;
+
+ /* AN Device capability */
+ tmp_reg16 = MDIO_SGMII_DEV_ABIL_BASEX_MODE;
+ phy_write(memac->pcsphy, MDIO_SGMII_DEV_ABIL_SGMII, tmp_reg16);
+
+ /* Adjust link timer for SGMII -
+ * For Serdes 1000BaseX auto-negotiation the timer should be 10 ms.
+ * The link_timer register is configured in units of the clock.
+ * - When running as 1G SGMII, Serdes clock is 125 MHz, so
+ * unit = 1 / (125*10^6 Hz) = 8 ns.
+ * 10 ms in units of 8 ns = 10ms / 8ns = 1250000 = 0x1312d0
+ * - When running as 2.5G SGMII, Serdes clock is 312.5 MHz, so
+ * unit = 1 / (312.5*10^6 Hz) = 3.2 ns.
+ * 10 ms in units of 3.2 ns = 10ms / 3.2ns = 3125000 = 0x2faf08.
+ * Since link_timer value of 1G SGMII will be too short for 2.5 SGMII,
+ * we always set up here a value of 2.5 SGMII.
+ */
+ phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_H, LINK_TMR_H_BASEX);
+ phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_L, LINK_TMR_L_BASEX);
+
+ /* Restart AN */
+ tmp_reg16 = SGMII_CR_DEF_VAL | SGMII_CR_RESTART_AN;
+ phy_write(memac->pcsphy, 0x0, tmp_reg16);
+}
+
+static int check_init_parameters(struct fman_mac *memac)
+{
+ if (memac->addr == 0) {
+ pr_err("Ethernet MAC must have a valid MAC address\n");
+ return -EINVAL;
+ }
+ if (!memac->exception_cb) {
+ pr_err("Uninitialized exception handler\n");
+ return -EINVAL;
+ }
+ if (!memac->event_cb) {
+ pr_warn("Uninitialize event handler\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int get_exception_flag(enum fman_mac_exceptions exception)
+{
+ u32 bit_mask;
+
+ switch (exception) {
+ case FM_MAC_EX_10G_TX_ECC_ER:
+ bit_mask = MEMAC_IMASK_TECC_ER;
+ break;
+ case FM_MAC_EX_10G_RX_ECC_ER:
+ bit_mask = MEMAC_IMASK_RECC_ER;
+ break;
+ case FM_MAC_EX_TS_FIFO_ECC_ERR:
+ bit_mask = MEMAC_IMASK_TSECC_ER;
+ break;
+ case FM_MAC_EX_MAGIC_PACKET_INDICATION:
+ bit_mask = MEMAC_IMASK_MGI;
+ break;
+ default:
+ bit_mask = 0;
+ break;
+ }
+
+ return bit_mask;
+}
+
+static void memac_err_exception(void *handle)
+{
+ struct fman_mac *memac = (struct fman_mac *)handle;
+ struct memac_regs __iomem *regs = memac->regs;
+ u32 event, imask;
+
+ event = ioread32be(&regs->ievent);
+ imask = ioread32be(&regs->imask);
+
+ /* Imask include both error and notification/event bits.
+ * Leaving only error bits enabled by imask.
+ * The imask error bits are shifted by 16 bits offset from
+ * their corresponding location in the ievent - hence the >> 16
+ */
+ event &= ((imask & MEMAC_ALL_ERRS_IMASK) >> 16);
+
+ iowrite32be(event, &regs->ievent);
+
+ if (event & MEMAC_IEVNT_TS_ECC_ER)
+ memac->exception_cb(memac->dev_id, FM_MAC_EX_TS_FIFO_ECC_ERR);
+ if (event & MEMAC_IEVNT_TX_ECC_ER)
+ memac->exception_cb(memac->dev_id, FM_MAC_EX_10G_TX_ECC_ER);
+ if (event & MEMAC_IEVNT_RX_ECC_ER)
+ memac->exception_cb(memac->dev_id, FM_MAC_EX_10G_RX_ECC_ER);
+}
+
+static void memac_exception(void *handle)
+{
+ struct fman_mac *memac = (struct fman_mac *)handle;
+ struct memac_regs __iomem *regs = memac->regs;
+ u32 event, imask;
+
+ event = ioread32be(&regs->ievent);
+ imask = ioread32be(&regs->imask);
+
+ /* Imask include both error and notification/event bits.
+ * Leaving only error bits enabled by imask.
+ * The imask error bits are shifted by 16 bits offset from
+ * their corresponding location in the ievent - hence the >> 16
+ */
+ event &= ((imask & MEMAC_ALL_ERRS_IMASK) >> 16);
+
+ iowrite32be(event, &regs->ievent);
+
+ if (event & MEMAC_IEVNT_MGI)
+ memac->exception_cb(memac->dev_id,
+ FM_MAC_EX_MAGIC_PACKET_INDICATION);
+}
+
+static void free_init_resources(struct fman_mac *memac)
+{
+ fman_unregister_intr(memac->fm, FMAN_MOD_MAC, memac->mac_id,
+ FMAN_INTR_TYPE_ERR);
+
+ fman_unregister_intr(memac->fm, FMAN_MOD_MAC, memac->mac_id,
+ FMAN_INTR_TYPE_NORMAL);
+
+ /* release the driver's group hash table */
+ free_hash_table(memac->multicast_addr_hash);
+ memac->multicast_addr_hash = NULL;
+
+ /* release the driver's individual hash table */
+ free_hash_table(memac->unicast_addr_hash);
+ memac->unicast_addr_hash = NULL;
+}
+
+static bool is_init_done(struct memac_cfg *memac_drv_params)
+{
+ /* Checks if mEMAC driver parameters were initialized */
+ if (!memac_drv_params)
+ return true;
+
+ return false;
+}
+
+int memac_enable(struct fman_mac *memac, enum comm_mode mode)
+{
+ struct memac_regs __iomem *regs = memac->regs;
+ u32 tmp;
+
+ if (!is_init_done(memac->memac_drv_param))
+ return -EINVAL;
+
+ tmp = ioread32be(&regs->command_config);
+ if (mode & COMM_MODE_RX)
+ tmp |= CMD_CFG_RX_EN;
+ if (mode & COMM_MODE_TX)
+ tmp |= CMD_CFG_TX_EN;
+
+ iowrite32be(tmp, &regs->command_config);
+
+ return 0;
+}
+
+int memac_disable(struct fman_mac *memac, enum comm_mode mode)
+{
+ struct memac_regs __iomem *regs = memac->regs;
+ u32 tmp;
+
+ if (!is_init_done(memac->memac_drv_param))
+ return -EINVAL;
+
+ tmp = ioread32be(&regs->command_config);
+ if (mode & COMM_MODE_RX)
+ tmp &= ~CMD_CFG_RX_EN;
+ if (mode & COMM_MODE_TX)
+ tmp &= ~CMD_CFG_TX_EN;
+
+ iowrite32be(tmp, &regs->command_config);
+
+ return 0;
+}
+
+int memac_set_promiscuous(struct fman_mac *memac, bool new_val)
+{
+ struct memac_regs __iomem *regs = memac->regs;
+ u32 tmp;
+
+ if (!is_init_done(memac->memac_drv_param))
+ return -EINVAL;
+
+ tmp = ioread32be(&regs->command_config);
+ if (new_val)
+ tmp |= CMD_CFG_PROMIS_EN;
+ else
+ tmp &= ~CMD_CFG_PROMIS_EN;
+
+ iowrite32be(tmp, &regs->command_config);
+
+ return 0;
+}
+
+int memac_adjust_link(struct fman_mac *memac, u16 speed)
+{
+ struct memac_regs __iomem *regs = memac->regs;
+ u32 tmp;
+
+ if (!is_init_done(memac->memac_drv_param))
+ return -EINVAL;
+
+ tmp = ioread32be(&regs->if_mode);
+
+ /* Set full duplex */
+ tmp &= ~IF_MODE_HD;
+
+ if (memac->phy_if == PHY_INTERFACE_MODE_RGMII) {
+ /* Configure RGMII in manual mode */
+ tmp &= ~IF_MODE_RGMII_AUTO;
+ tmp &= ~IF_MODE_RGMII_SP_MASK;
+ /* Full duplex */
+ tmp |= IF_MODE_RGMII_FD;
+
+ switch (speed) {
+ case SPEED_1000:
+ tmp |= IF_MODE_RGMII_1000;
+ break;
+ case SPEED_100:
+ tmp |= IF_MODE_RGMII_100;
+ break;
+ case SPEED_10:
+ tmp |= IF_MODE_RGMII_10;
+ break;
+ default:
+ break;
+ }
+ }
+
+ iowrite32be(tmp, &regs->if_mode);
+
+ return 0;
+}
+
+int memac_cfg_max_frame_len(struct fman_mac *memac, u16 new_val)
+{
+ if (is_init_done(memac->memac_drv_param))
+ return -EINVAL;
+
+ memac->memac_drv_param->max_frame_length = new_val;
+
+ return 0;
+}
+
+int memac_cfg_reset_on_init(struct fman_mac *memac, bool enable)
+{
+ if (is_init_done(memac->memac_drv_param))
+ return -EINVAL;
+
+ memac->memac_drv_param->reset_on_init = enable;
+
+ return 0;
+}
+
+int memac_cfg_fixed_link(struct fman_mac *memac,
+ struct fixed_phy_status *fixed_link)
+{
+ if (is_init_done(memac->memac_drv_param))
+ return -EINVAL;
+
+ memac->memac_drv_param->fixed_link = fixed_link;
+
+ return 0;
+}
+
+int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority,
+ u16 pause_time, u16 thresh_time)
+{
+ struct memac_regs __iomem *regs = memac->regs;
+ u32 tmp;
+
+ if (!is_init_done(memac->memac_drv_param))
+ return -EINVAL;
+
+ tmp = ioread32be(&regs->tx_fifo_sections);
+
+ GET_TX_EMPTY_DEFAULT_VALUE(tmp);
+ iowrite32be(tmp, &regs->tx_fifo_sections);
+
+ tmp = ioread32be(&regs->command_config);
+ tmp &= ~CMD_CFG_PFC_MODE;
+ priority = 0;
+
+ iowrite32be(tmp, &regs->command_config);
+
+ tmp = ioread32be(&regs->pause_quanta[priority / 2]);
+ if (priority % 2)
+ tmp &= CLXY_PAUSE_QUANTA_CLX_PQNT;
+ else
+ tmp &= CLXY_PAUSE_QUANTA_CLY_PQNT;
+ tmp |= ((u32)pause_time << (16 * (priority % 2)));
+ iowrite32be(tmp, &regs->pause_quanta[priority / 2]);
+
+ tmp = ioread32be(&regs->pause_thresh[priority / 2]);
+ if (priority % 2)
+ tmp &= CLXY_PAUSE_THRESH_CLX_QTH;
+ else
+ tmp &= CLXY_PAUSE_THRESH_CLY_QTH;
+ tmp |= ((u32)thresh_time << (16 * (priority % 2)));
+ iowrite32be(tmp, &regs->pause_thresh[priority / 2]);
+
+ return 0;
+}
+
+int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en)
+{
+ struct memac_regs __iomem *regs = memac->regs;
+ u32 tmp;
+
+ if (!is_init_done(memac->memac_drv_param))
+ return -EINVAL;
+
+ tmp = ioread32be(&regs->command_config);
+ if (en)
+ tmp &= ~CMD_CFG_PAUSE_IGNORE;
+ else
+ tmp |= CMD_CFG_PAUSE_IGNORE;
+
+ iowrite32be(tmp, &regs->command_config);
+
+ return 0;
+}
+
+int memac_modify_mac_address(struct fman_mac *memac, enet_addr_t *enet_addr)
+{
+ if (!is_init_done(memac->memac_drv_param))
+ return -EINVAL;
+
+ add_addr_in_paddr(memac->regs, (u8 *)(*enet_addr), 0);
+
+ return 0;
+}
+
+int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
+{
+ struct memac_regs __iomem *regs = memac->regs;
+ struct eth_hash_entry *hash_entry;
+ u32 hash;
+ u64 addr;
+
+ if (!is_init_done(memac->memac_drv_param))
+ return -EINVAL;
+
+ addr = ENET_ADDR_TO_UINT64(*eth_addr);
+
+ if (!(addr & GROUP_ADDRESS)) {
+ /* Unicast addresses not supported in hash */
+ pr_err("Unicast Address\n");
+ return -EINVAL;
+ }
+ hash = get_mac_addr_hash_code(addr) & HASH_CTRL_ADDR_MASK;
+
+ /* Create element to be added to the driver hash table */
+ hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL);
+ if (!hash_entry)
+ return -ENOMEM;
+ hash_entry->addr = addr;
+ INIT_LIST_HEAD(&hash_entry->node);
+
+ list_add_tail(&hash_entry->node,
+ &memac->multicast_addr_hash->lsts[hash]);
+ iowrite32be(hash | HASH_CTRL_MCAST_EN, &regs->hashtable_ctrl);
+
+ return 0;
+}
+
+int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
+{
+ struct memac_regs __iomem *regs = memac->regs;
+ struct eth_hash_entry *hash_entry = NULL;
+ struct list_head *pos;
+ u32 hash;
+ u64 addr;
+
+ if (!is_init_done(memac->memac_drv_param))
+ return -EINVAL;
+
+ addr = ENET_ADDR_TO_UINT64(*eth_addr);
+
+ hash = get_mac_addr_hash_code(addr) & HASH_CTRL_ADDR_MASK;
+
+ list_for_each(pos, &memac->multicast_addr_hash->lsts[hash]) {
+ hash_entry = ETH_HASH_ENTRY_OBJ(pos);
+ if (hash_entry->addr == addr) {
+ list_del_init(&hash_entry->node);
+ kfree(hash_entry);
+ break;
+ }
+ }
+ if (list_empty(&memac->multicast_addr_hash->lsts[hash]))
+ iowrite32be(hash & ~HASH_CTRL_MCAST_EN, &regs->hashtable_ctrl);
+
+ return 0;
+}
+
+int memac_set_exception(struct fman_mac *memac,
+ enum fman_mac_exceptions exception, bool enable)
+{
+ u32 bit_mask = 0;
+
+ if (!is_init_done(memac->memac_drv_param))
+ return -EINVAL;
+
+ bit_mask = get_exception_flag(exception);
+ if (bit_mask) {
+ if (enable)
+ memac->exceptions |= bit_mask;
+ else
+ memac->exceptions &= ~bit_mask;
+ } else {
+ pr_err("Undefined exception\n");
+ return -EINVAL;
+ }
+ set_exception(memac->regs, bit_mask, enable);
+
+ return 0;
+}
+
+int memac_init(struct fman_mac *memac)
+{
+ struct memac_cfg *memac_drv_param;
+ u8 i;
+ enet_addr_t eth_addr;
+ bool slow_10g_if = false;
+ struct fixed_phy_status *fixed_link;
+ int err;
+ u32 reg32 = 0;
+
+ if (is_init_done(memac->memac_drv_param))
+ return -EINVAL;
+
+ err = check_init_parameters(memac);
+ if (err)
+ return err;
+
+ memac_drv_param = memac->memac_drv_param;
+
+ if (memac->fm_rev_info.major == 6 && memac->fm_rev_info.minor == 4)
+ slow_10g_if = true;
+
+ /* First, reset the MAC if desired. */
+ if (memac_drv_param->reset_on_init) {
+ err = reset(memac->regs);
+ if (err) {
+ pr_err("mEMAC reset failed\n");
+ return err;
+ }
+ }
+
+ /* MAC Address */
+ MAKE_ENET_ADDR_FROM_UINT64(memac->addr, eth_addr);
+ add_addr_in_paddr(memac->regs, (u8 *)eth_addr, 0);
+
+ fixed_link = memac_drv_param->fixed_link;
+
+ init(memac->regs, memac->memac_drv_param, memac->phy_if,
+ memac->max_speed, slow_10g_if, memac->exceptions);
+
+ /* FM_RX_FIFO_CORRUPT_ERRATA_10GMAC_A006320 errata workaround
+ * Exists only in FMan 6.0 and 6.3.
+ */
+ if ((memac->fm_rev_info.major == 6) &&
+ ((memac->fm_rev_info.minor == 0) ||
+ (memac->fm_rev_info.minor == 3))) {
+ /* MAC strips CRC from received frames - this workaround
+ * should decrease the likelihood of bug appearance
+ */
+ reg32 = ioread32be(&memac->regs->command_config);
+ reg32 &= ~CMD_CFG_CRC_FWD;
+ iowrite32be(reg32, &memac->regs->command_config);
+ }
+
+ if (memac->phy_if == PHY_INTERFACE_MODE_SGMII) {
+ /* Configure internal SGMII PHY */
+ if (memac->basex_if)
+ setup_sgmii_internal_phy_base_x(memac);
+ else
+ setup_sgmii_internal_phy(memac, fixed_link);
+ } else if (memac->phy_if == PHY_INTERFACE_MODE_QSGMII) {
+ /* Configure 4 internal SGMII PHYs */
+ for (i = 0; i < 4; i++) {
+ u8 qsmgii_phy_addr, phy_addr;
+ /* QSGMII PHY address occupies 3 upper bits of 5-bit
+ * phy_address; the lower 2 bits are used to extend
+ * register address space and access each one of 4
+ * ports inside QSGMII.
+ */
+ phy_addr = memac->pcsphy->addr;
+ qsmgii_phy_addr = (u8)((phy_addr << 2) | i);
+ memac->pcsphy->addr = qsmgii_phy_addr;
+ if (memac->basex_if)
+ setup_sgmii_internal_phy_base_x(memac);
+ else
+ setup_sgmii_internal_phy(memac, fixed_link);
+
+ memac->pcsphy->addr = phy_addr;
+ }
+ }
+
+ /* Max Frame Length */
+ err = fman_set_mac_max_frame(memac->fm, memac->mac_id,
+ memac_drv_param->max_frame_length);
+ if (err) {
+ pr_err("settings Mac max frame length is FAILED\n");
+ return err;
+ }
+
+ memac->multicast_addr_hash = alloc_hash_table(HASH_TABLE_SIZE);
+ if (!memac->multicast_addr_hash) {
+ free_init_resources(memac);
+ pr_err("allocation hash table is FAILED\n");
+ return -ENOMEM;
+ }
+
+ memac->unicast_addr_hash = alloc_hash_table(HASH_TABLE_SIZE);
+ if (!memac->unicast_addr_hash) {
+ free_init_resources(memac);
+ pr_err("allocation hash table is FAILED\n");
+ return -ENOMEM;
+ }
+
+ fman_register_intr(memac->fm, FMAN_MOD_MAC, memac->mac_id,
+ FMAN_INTR_TYPE_ERR, memac_err_exception, memac);
+
+ fman_register_intr(memac->fm, FMAN_MOD_MAC, memac->mac_id,
+ FMAN_INTR_TYPE_NORMAL, memac_exception, memac);
+
+ kfree(memac_drv_param);
+ memac->memac_drv_param = NULL;
+
+ return 0;
+}
+
+int memac_free(struct fman_mac *memac)
+{
+ free_init_resources(memac);
+
+ kfree(memac->memac_drv_param);
+ kfree(memac);
+
+ return 0;
+}
+
+struct fman_mac *memac_config(struct fman_mac_params *params)
+{
+ struct fman_mac *memac;
+ struct memac_cfg *memac_drv_param;
+ void __iomem *base_addr;
+
+ base_addr = params->base_addr;
+ /* allocate memory for the m_emac data structure */
+ memac = kzalloc(sizeof(*memac), GFP_KERNEL);
+ if (!memac)
+ return NULL;
+
+ /* allocate memory for the m_emac driver parameters data structure */
+ memac_drv_param = kzalloc(sizeof(*memac_drv_param), GFP_KERNEL);
+ if (!memac_drv_param) {
+ memac_free(memac);
+ return NULL;
+ }
+
+ /* Plant parameter structure pointer */
+ memac->memac_drv_param = memac_drv_param;
+
+ set_dflts(memac_drv_param);
+
+ memac->addr = ENET_ADDR_TO_UINT64(params->addr);
+
+ memac->regs = base_addr;
+ memac->max_speed = params->max_speed;
+ memac->phy_if = params->phy_if;
+ memac->mac_id = params->mac_id;
+ memac->exceptions = (MEMAC_IMASK_TSECC_ER | MEMAC_IMASK_TECC_ER |
+ MEMAC_IMASK_RECC_ER | MEMAC_IMASK_MGI);
+ memac->exception_cb = params->exception_cb;
+ memac->event_cb = params->event_cb;
+ memac->dev_id = params->dev_id;
+ memac->fm = params->fm;
+ memac->basex_if = params->basex_if;
+
+ /* Save FMan revision */
+ fman_get_revision(memac->fm, &memac->fm_rev_info);
+
+ if (memac->phy_if == PHY_INTERFACE_MODE_SGMII) {
+ if (!params->internal_phy_node) {
+ pr_err("PCS PHY node is not available\n");
+ memac_free(memac);
+ return NULL;
+ }
+
+ memac->pcsphy = of_phy_find_device(params->internal_phy_node);
+ if (!memac->pcsphy) {
+ pr_err("of_phy_find_device (PCS PHY) failed\n");
+ memac_free(memac);
+ return NULL;
+ }
+ }
+
+ return memac;
+}
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.h b/drivers/net/ethernet/freescale/fman/fman_memac.h
new file mode 100644
index 0000000..173d8e0
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2008-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __MEMAC_H
+#define __MEMAC_H
+
+#include "fman_mac.h"
+
+#include <linux/netdevice.h>
+
+struct fman_mac *memac_config(struct fman_mac_params *params);
+int memac_set_promiscuous(struct fman_mac *memac, bool new_val);
+int memac_modify_mac_address(struct fman_mac *memac, enet_addr_t *enet_addr);
+int memac_adjust_link(struct fman_mac *memac, u16 speed);
+int memac_cfg_max_frame_len(struct fman_mac *memac, u16 new_val);
+int memac_cfg_reset_on_init(struct fman_mac *memac, bool enable);
+int memac_cfg_fixed_link(struct fman_mac *memac,
+ struct fixed_phy_status *fixed_link);
+int memac_enable(struct fman_mac *memac, enum comm_mode mode);
+int memac_disable(struct fman_mac *memac, enum comm_mode mode);
+int memac_init(struct fman_mac *memac);
+int memac_free(struct fman_mac *memac);
+int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en);
+int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority,
+ u16 pause_time, u16 thresh_time);
+int memac_set_exception(struct fman_mac *memac,
+ enum fman_mac_exceptions exception, bool enable);
+int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr);
+int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr);
+
+#endif /* __MEMAC_H */
diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.c b/drivers/net/ethernet/freescale/fman/fman_muram.c
new file mode 100644
index 0000000..4eb0e9a
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fman/fman_muram.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright 2008-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "fman_muram.h"
+
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/genalloc.h>
+
+struct muram_info {
+ struct gen_pool *pool;
+ void __iomem *vbase;
+ size_t size;
+ phys_addr_t pbase;
+};
+
+static unsigned long fman_muram_vbase_to_offset(struct muram_info *muram,
+ unsigned long vaddr)
+{
+ return vaddr - (unsigned long)muram->vbase;
+}
+
+/**
+ * fman_muram_init
+ * @base: Pointer to base of memory mapped FM-MURAM.
+ * @size: Size of the FM-MURAM partition.
+ *
+ * Creates partition in the MURAM.
+ * The routine returns a pointer to the MURAM partition.
+ * This pointer must be passed as to all other FM-MURAM function calls.
+ * No actual initialization or configuration of FM_MURAM hardware is done by
+ * this routine.
+ *
+ * Return: pointer to FM-MURAM object, or NULL for Failure.
+ */
+struct muram_info *fman_muram_init(phys_addr_t base, size_t size)
+{
+ struct muram_info *muram;
+ void __iomem *vaddr;
+ int ret;
+
+ muram = kzalloc(sizeof(*muram), GFP_KERNEL);
+ if (!muram)
+ return NULL;
+
+ muram->pool = gen_pool_create(ilog2(64), -1);
+ if (!muram->pool) {
+ pr_err("%s(): MURAM pool create failed\n", __func__);
+ goto muram_free;
+ }
+
+ vaddr = ioremap(base, size);
+ if (!vaddr) {
+ pr_err("%s(): MURAM ioremap failed\n", __func__);
+ goto pool_destroy;
+ }
+
+ ret = gen_pool_add_virt(muram->pool, (unsigned long)vaddr,
+ base, size, -1);
+ if (ret < 0) {
+ pr_err("%s(): MURAM pool add failed\n", __func__);
+ iounmap(vaddr);
+ goto pool_destroy;
+ }
+
+ memset_io(vaddr, 0, (int)size);
+
+ muram->vbase = vaddr;
+ muram->pbase = base;
+ return muram;
+
+pool_destroy:
+ gen_pool_destroy(muram->pool);
+muram_free:
+ kfree(muram);
+ return NULL;
+}
+
+/**
+ * fman_muram_offset_to_vbase
+ * @muram: FM-MURAM module pointer.
+ * @offset: the offset of the memory block
+ *
+ * Gives the address of the memory region from specific offset
+ *
+ * Return: The address of the memory block
+ */
+unsigned long fman_muram_offset_to_vbase(struct muram_info *muram,
+ unsigned long offset)
+{
+ return offset + (unsigned long)muram->vbase;
+}
+
+/**
+ * fman_muram_alloc
+ * @muram: FM-MURAM module pointer.
+ * @size: Size of the memory to be allocated.
+ *
+ * Allocate some memory from FM-MURAM partition.
+ *
+ * Return: address of the allocated memory; NULL otherwise.
+ */
+int fman_muram_alloc(struct muram_info *muram, size_t size)
+{
+ unsigned long vaddr;
+
+ vaddr = gen_pool_alloc(muram->pool, size);
+ if (!vaddr)
+ return -ENOMEM;
+
+ memset_io((void __iomem *)vaddr, 0, size);
+
+ return fman_muram_vbase_to_offset(muram, vaddr);
+}
+
+/**
+ * fman_muram_free_mem
+ * muram: FM-MURAM module pointer.
+ * offset: offset of the memory region to be freed.
+ * size: size of the memory to be freed.
+ *
+ * Free an allocated memory from FM-MURAM partition.
+ */
+void fman_muram_free_mem(struct muram_info *muram, u32 offset, size_t size)
+{
+ unsigned long addr = fman_muram_offset_to_vbase(muram, offset);
+
+ gen_pool_free(muram->pool, addr, size);
+}
diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.h b/drivers/net/ethernet/freescale/fman/fman_muram.h
new file mode 100644
index 0000000..dbf0af9
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fman/fman_muram.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2008-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __FM_MURAM_EXT
+#define __FM_MURAM_EXT
+
+#include <linux/types.h>
+
+#define FM_MURAM_INVALID_ALLOCATION -1
+
+/* Structure for FM MURAM information */
+struct muram_info;
+
+struct muram_info *fman_muram_init(phys_addr_t base, size_t size);
+
+unsigned long fman_muram_offset_to_vbase(struct muram_info *muram,
+ unsigned long offset);
+
+int fman_muram_alloc(struct muram_info *muram, size_t size);
+
+void fman_muram_free_mem(struct muram_info *muram, u32 offset, size_t size);
+
+#endif /* __FM_MURAM_EXT */
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
new file mode 100644
index 0000000..70c198d
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -0,0 +1,1778 @@
+/*
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "fman_port.h"
+#include "fman.h"
+#include "fman_sp.h"
+
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/delay.h>
+#include <linux/libfdt_env.h>
+
+/* Queue ID */
+#define DFLT_FQ_ID 0x00FFFFFF
+
+/* General defines */
+#define PORT_BMI_FIFO_UNITS 0x100
+
+#define MAX_PORT_FIFO_SIZE(bmi_max_fifo_size) \
+ min((u32)bmi_max_fifo_size, (u32)1024 * FMAN_BMI_FIFO_UNITS)
+
+#define PORT_CG_MAP_NUM 8
+#define PORT_PRS_RESULT_WORDS_NUM 8
+#define PORT_IC_OFFSET_UNITS 0x10
+
+#define MIN_EXT_BUF_SIZE 64
+
+#define BMI_PORT_REGS_OFFSET 0
+#define QMI_PORT_REGS_OFFSET 0x400
+
+/* Default values */
+#define DFLT_PORT_BUFFER_PREFIX_CONTEXT_DATA_ALIGN \
+ DFLT_FM_SP_BUFFER_PREFIX_CONTEXT_DATA_ALIGN
+
+#define DFLT_PORT_CUT_BYTES_FROM_END 4
+
+#define DFLT_PORT_ERRORS_TO_DISCARD FM_PORT_FRM_ERR_CLS_DISCARD
+#define DFLT_PORT_MAX_FRAME_LENGTH 9600
+
+#define DFLT_PORT_RX_FIFO_PRI_ELEVATION_LEV(bmi_max_fifo_size) \
+ MAX_PORT_FIFO_SIZE(bmi_max_fifo_size)
+
+#define DFLT_PORT_RX_FIFO_THRESHOLD(major, bmi_max_fifo_size) \
+ (major == 6 ? \
+ MAX_PORT_FIFO_SIZE(bmi_max_fifo_size) : \
+ (MAX_PORT_FIFO_SIZE(bmi_max_fifo_size) * 3 / 4)) \
+
+#define DFLT_PORT_EXTRA_NUM_OF_FIFO_BUFS 0
+
+/* QMI defines */
+#define QMI_DEQ_CFG_SUBPORTAL_MASK 0x1f
+
+#define QMI_PORT_CFG_EN 0x80000000
+#define QMI_PORT_STATUS_DEQ_FD_BSY 0x20000000
+
+#define QMI_DEQ_CFG_PRI 0x80000000
+#define QMI_DEQ_CFG_TYPE1 0x10000000
+#define QMI_DEQ_CFG_TYPE2 0x20000000
+#define QMI_DEQ_CFG_TYPE3 0x30000000
+#define QMI_DEQ_CFG_PREFETCH_PARTIAL 0x01000000
+#define QMI_DEQ_CFG_PREFETCH_FULL 0x03000000
+#define QMI_DEQ_CFG_SP_MASK 0xf
+#define QMI_DEQ_CFG_SP_SHIFT 20
+
+#define QMI_BYTE_COUNT_LEVEL_CONTROL(_type) \
+ (_type == FMAN_PORT_TYPE_TX ? 0x1400 : 0x400)
+
+/* BMI defins */
+#define BMI_EBD_EN 0x80000000
+
+#define BMI_PORT_CFG_EN 0x80000000
+
+#define BMI_PORT_STATUS_BSY 0x80000000
+
+#define BMI_DMA_ATTR_SWP_SHIFT FMAN_SP_DMA_ATTR_SWP_SHIFT
+#define BMI_DMA_ATTR_WRITE_OPTIMIZE FMAN_SP_DMA_ATTR_WRITE_OPTIMIZE
+
+#define BMI_RX_FIFO_PRI_ELEVATION_SHIFT 16
+#define BMI_RX_FIFO_THRESHOLD_ETHE 0x80000000
+
+#define BMI_FRAME_END_CS_IGNORE_SHIFT 24
+#define BMI_FRAME_END_CS_IGNORE_MASK 0x0000001f
+
+#define BMI_RX_FRAME_END_CUT_SHIFT 16
+#define BMI_RX_FRAME_END_CUT_MASK 0x0000001f
+
+#define BMI_IC_TO_EXT_SHIFT FMAN_SP_IC_TO_EXT_SHIFT
+#define BMI_IC_TO_EXT_MASK 0x0000001f
+#define BMI_IC_FROM_INT_SHIFT FMAN_SP_IC_FROM_INT_SHIFT
+#define BMI_IC_FROM_INT_MASK 0x0000000f
+#define BMI_IC_SIZE_MASK 0x0000001f
+
+#define BMI_INT_BUF_MARG_SHIFT 28
+#define BMI_INT_BUF_MARG_MASK 0x0000000f
+#define BMI_EXT_BUF_MARG_START_SHIFT FMAN_SP_EXT_BUF_MARG_START_SHIFT
+#define BMI_EXT_BUF_MARG_START_MASK 0x000001ff
+#define BMI_EXT_BUF_MARG_END_MASK 0x000001ff
+
+#define BMI_CMD_MR_LEAC 0x00200000
+#define BMI_CMD_MR_SLEAC 0x00100000
+#define BMI_CMD_MR_MA 0x00080000
+#define BMI_CMD_MR_DEAS 0x00040000
+#define BMI_CMD_RX_MR_DEF (BMI_CMD_MR_LEAC | \
+ BMI_CMD_MR_SLEAC | \
+ BMI_CMD_MR_MA | \
+ BMI_CMD_MR_DEAS)
+#define BMI_CMD_TX_MR_DEF 0
+
+#define BMI_CMD_ATTR_ORDER 0x80000000
+#define BMI_CMD_ATTR_SYNC 0x02000000
+#define BMI_CMD_ATTR_COLOR_SHIFT 26
+
+#define BMI_FIFO_PIPELINE_DEPTH_SHIFT 12
+#define BMI_FIFO_PIPELINE_DEPTH_MASK 0x0000000f
+#define BMI_NEXT_ENG_FD_BITS_SHIFT 24
+
+#define BMI_EXT_BUF_POOL_VALID FMAN_SP_EXT_BUF_POOL_VALID
+#define BMI_EXT_BUF_POOL_EN_COUNTER FMAN_SP_EXT_BUF_POOL_EN_COUNTER
+#define BMI_EXT_BUF_POOL_BACKUP FMAN_SP_EXT_BUF_POOL_BACKUP
+#define BMI_EXT_BUF_POOL_ID_SHIFT 16
+#define BMI_EXT_BUF_POOL_ID_MASK 0x003F0000
+#define BMI_POOL_DEP_NUM_OF_POOLS_SHIFT 16
+
+#define BMI_TX_FIFO_MIN_FILL_SHIFT 16
+
+#define BMI_PRIORITY_ELEVATION_LEVEL ((0x3FF + 1) * PORT_BMI_FIFO_UNITS)
+#define BMI_FIFO_THRESHOLD ((0x3FF + 1) * PORT_BMI_FIFO_UNITS)
+
+#define BMI_DEQUEUE_PIPELINE_DEPTH(_type, _speed) \
+ ((_type == FMAN_PORT_TYPE_TX && _speed == 10000) ? 4 : 1)
+
+#define RX_ERRS_TO_ENQ \
+ (FM_PORT_FRM_ERR_DMA | \
+ FM_PORT_FRM_ERR_PHYSICAL | \
+ FM_PORT_FRM_ERR_SIZE | \
+ FM_PORT_FRM_ERR_EXTRACTION | \
+ FM_PORT_FRM_ERR_NO_SCHEME | \
+ FM_PORT_FRM_ERR_PRS_TIMEOUT | \
+ FM_PORT_FRM_ERR_PRS_ILL_INSTRUCT | \
+ FM_PORT_FRM_ERR_BLOCK_LIMIT_EXCEEDED | \
+ FM_PORT_FRM_ERR_PRS_HDR_ERR | \
+ FM_PORT_FRM_ERR_KEYSIZE_OVERFLOW | \
+ FM_PORT_FRM_ERR_IPRE)
+
+/* NIA defines */
+#define NIA_ORDER_RESTOR 0x00800000
+#define NIA_ENG_BMI 0x00500000
+#define NIA_ENG_QMI_ENQ 0x00540000
+#define NIA_ENG_QMI_DEQ 0x00580000
+
+#define NIA_BMI_AC_ENQ_FRAME 0x00000002
+#define NIA_BMI_AC_TX_RELEASE 0x000002C0
+#define NIA_BMI_AC_RELEASE 0x000000C0
+#define NIA_BMI_AC_TX 0x00000274
+#define NIA_BMI_AC_FETCH_ALL_FRAME 0x0000020c
+
+/* Port IDs */
+#define TX_10G_PORT_BASE 0x30
+#define RX_10G_PORT_BASE 0x10
+
+/* BMI Rx port register map */
+struct fman_port_rx_bmi_regs {
+ u32 fmbm_rcfg; /* Rx Configuration */
+ u32 fmbm_rst; /* Rx Status */
+ u32 fmbm_rda; /* Rx DMA attributes */
+ u32 fmbm_rfp; /* Rx FIFO Parameters */
+ u32 fmbm_rfed; /* Rx Frame End Data */
+ u32 fmbm_ricp; /* Rx Internal Context Parameters */
+ u32 fmbm_rim; /* Rx Internal Buffer Margins */
+ u32 fmbm_rebm; /* Rx External Buffer Margins */
+ u32 fmbm_rfne; /* Rx Frame Next Engine */
+ u32 fmbm_rfca; /* Rx Frame Command Attributes. */
+ u32 fmbm_rfpne; /* Rx Frame Parser Next Engine */
+ u32 fmbm_rpso; /* Rx Parse Start Offset */
+ u32 fmbm_rpp; /* Rx Policer Profile */
+ u32 fmbm_rccb; /* Rx Coarse Classification Base */
+ u32 fmbm_reth; /* Rx Excessive Threshold */
+ u32 reserved003c[1]; /* (0x03C 0x03F) */
+ u32 fmbm_rprai[PORT_PRS_RESULT_WORDS_NUM];
+ /* Rx Parse Results Array Init */
+ u32 fmbm_rfqid; /* Rx Frame Queue ID */
+ u32 fmbm_refqid; /* Rx Error Frame Queue ID */
+ u32 fmbm_rfsdm; /* Rx Frame Status Discard Mask */
+ u32 fmbm_rfsem; /* Rx Frame Status Error Mask */
+ u32 fmbm_rfene; /* Rx Frame Enqueue Next Engine */
+ u32 reserved0074[0x2]; /* (0x074-0x07C) */
+ u32 fmbm_rcmne; /* Rx Frame Continuous Mode Next Engine */
+ u32 reserved0080[0x20]; /* (0x080 0x0FF) */
+ u32 fmbm_ebmpi[FMAN_PORT_MAX_EXT_POOLS_NUM];
+ /* Buffer Manager pool Information- */
+ u32 fmbm_acnt[FMAN_PORT_MAX_EXT_POOLS_NUM]; /* Allocate Counter- */
+ u32 reserved0130[8]; /* 0x130/0x140 - 0x15F reserved - */
+ u32 fmbm_rcgm[PORT_CG_MAP_NUM]; /* Congestion Group Map */
+ u32 fmbm_mpd; /* BM Pool Depletion */
+ u32 reserved0184[0x1F]; /* (0x184 0x1FF) */
+ u32 fmbm_rstc; /* Rx Statistics Counters */
+ u32 fmbm_rfrc; /* Rx Frame Counter */
+ u32 fmbm_rfbc; /* Rx Bad Frames Counter */
+ u32 fmbm_rlfc; /* Rx Large Frames Counter */
+ u32 fmbm_rffc; /* Rx Filter Frames Counter */
+ u32 fmbm_rfdc; /* Rx Frame Discard Counter */
+ u32 fmbm_rfldec; /* Rx Frames List DMA Error Counter */
+ u32 fmbm_rodc; /* Rx Out of Buffers Discard nntr */
+ u32 fmbm_rbdc; /* Rx Buffers Deallocate Counter */
+ u32 fmbm_rpec; /* RX Prepare to enqueue Counte */
+ u32 reserved0224[0x16]; /* (0x224 0x27F) */
+ u32 fmbm_rpc; /* Rx Performance Counters */
+ u32 fmbm_rpcp; /* Rx Performance Count Parameters */
+ u32 fmbm_rccn; /* Rx Cycle Counter */
+ u32 fmbm_rtuc; /* Rx Tasks Utilization Counter */
+ u32 fmbm_rrquc; /* Rx Receive Queue Utilization cntr */
+ u32 fmbm_rduc; /* Rx DMA Utilization Counter */
+ u32 fmbm_rfuc; /* Rx FIFO Utilization Counter */
+ u32 fmbm_rpac; /* Rx Pause Activation Counter */
+ u32 reserved02a0[0x18]; /* (0x2A0 0x2FF) */
+ u32 fmbm_rdcfg[0x3]; /* Rx Debug Configuration */
+ u32 fmbm_rgpr; /* Rx General Purpose Register */
+ u32 reserved0310[0x3a];
+};
+
+/* BMI Tx port register map */
+struct fman_port_tx_bmi_regs {
+ u32 fmbm_tcfg; /* Tx Configuration */
+ u32 fmbm_tst; /* Tx Status */
+ u32 fmbm_tda; /* Tx DMA attributes */
+ u32 fmbm_tfp; /* Tx FIFO Parameters */
+ u32 fmbm_tfed; /* Tx Frame End Data */
+ u32 fmbm_ticp; /* Tx Internal Context Parameters */
+ u32 fmbm_tfdne; /* Tx Frame Dequeue Next Engine. */
+ u32 fmbm_tfca; /* Tx Frame Command attribute. */
+ u32 fmbm_tcfqid; /* Tx Confirmation Frame Queue ID. */
+ u32 fmbm_tefqid; /* Tx Frame Error Queue ID */
+ u32 fmbm_tfene; /* Tx Frame Enqueue Next Engine */
+ u32 fmbm_trlmts; /* Tx Rate Limiter Scale */
+ u32 fmbm_trlmt; /* Tx Rate Limiter */
+ u32 reserved0034[0x0e]; /* (0x034-0x6c) */
+ u32 fmbm_tccb; /* Tx Coarse Classification base */
+ u32 fmbm_tfne; /* Tx Frame Next Engine */
+ u32 fmbm_tpfcm[0x02];
+ /* Tx Priority based Flow Control (PFC) Mapping */
+ u32 fmbm_tcmne; /* Tx Frame Continuous Mode Next Engine */
+ u32 reserved0080[0x60]; /* (0x080-0x200) */
+ u32 fmbm_tstc; /* Tx Statistics Counters */
+ u32 fmbm_tfrc; /* Tx Frame Counter */
+ u32 fmbm_tfdc; /* Tx Frames Discard Counter */
+ u32 fmbm_tfledc; /* Tx Frame len error discard cntr */
+ u32 fmbm_tfufdc; /* Tx Frame unsprt frmt discard cntr */
+ u32 fmbm_tbdc; /* Tx Buffers Deallocate Counter */
+ u32 reserved0218[0x1A]; /* (0x218-0x280) */
+ u32 fmbm_tpc; /* Tx Performance Counters */
+ u32 fmbm_tpcp; /* Tx Performance Count Parameters */
+ u32 fmbm_tccn; /* Tx Cycle Counter */
+ u32 fmbm_ttuc; /* Tx Tasks Utilization Counter */
+ u32 fmbm_ttcquc; /* Tx Transmit conf Q util Counter */
+ u32 fmbm_tduc; /* Tx DMA Utilization Counter */
+ u32 fmbm_tfuc; /* Tx FIFO Utilization Counter */
+ u32 reserved029c[16]; /* (0x29C-0x2FF) */
+ u32 fmbm_tdcfg[0x3]; /* Tx Debug Configuration */
+ u32 fmbm_tgpr; /* Tx General Purpose Register */
+ u32 reserved0310[0x3a]; /* (0x310-0x3FF) */
+};
+
+/* BMI port register map */
+union fman_port_bmi_regs {
+ struct fman_port_rx_bmi_regs rx;
+ struct fman_port_tx_bmi_regs tx;
+};
+
+/* QMI port register map */
+struct fman_port_qmi_regs {
+ u32 fmqm_pnc; /* PortID n Configuration Register */
+ u32 fmqm_pns; /* PortID n Status Register */
+ u32 fmqm_pnts; /* PortID n Task Status Register */
+ u32 reserved00c[4]; /* 0xn00C - 0xn01B */
+ u32 fmqm_pnen; /* PortID n Enqueue NIA Register */
+ u32 fmqm_pnetfc; /* PortID n Enq Total Frame Counter */
+ u32 reserved024[2]; /* 0xn024 - 0x02B */
+ u32 fmqm_pndn; /* PortID n Dequeue NIA Register */
+ u32 fmqm_pndc; /* PortID n Dequeue Config Register */
+ u32 fmqm_pndtfc; /* PortID n Dequeue tot Frame cntr */
+ u32 fmqm_pndfdc; /* PortID n Dequeue FQID Dflt Cntr */
+ u32 fmqm_pndcc; /* PortID n Dequeue Confirm Counter */
+};
+
+/* QMI dequeue prefetch modes */
+enum fman_port_deq_prefetch {
+ FMAN_PORT_DEQ_NO_PREFETCH, /* No prefetch mode */
+ FMAN_PORT_DEQ_PART_PREFETCH, /* Partial prefetch mode */
+ FMAN_PORT_DEQ_FULL_PREFETCH /* Full prefetch mode */
+};
+
+/* A structure for defining FM port resources */
+struct fman_port_rsrc {
+ u32 num; /* Committed required resource */
+ u32 extra; /* Extra (not committed) required resource */
+};
+
+enum fman_port_dma_swap {
+ FMAN_PORT_DMA_NO_SWAP, /* No swap, transfer data as is */
+ FMAN_PORT_DMA_SWAP_LE,
+ /* The transferred data should be swapped in PPC Little Endian mode */
+ FMAN_PORT_DMA_SWAP_BE
+ /* The transferred data should be swapped in Big Endian mode */
+};
+
+/* Default port color */
+enum fman_port_color {
+ FMAN_PORT_COLOR_GREEN, /* Default port color is green */
+ FMAN_PORT_COLOR_YELLOW, /* Default port color is yellow */
+ FMAN_PORT_COLOR_RED, /* Default port color is red */
+ FMAN_PORT_COLOR_OVERRIDE /* Ignore color */
+};
+
+/* QMI dequeue from the SP channel - types */
+enum fman_port_deq_type {
+ FMAN_PORT_DEQ_BY_PRI,
+ /* Priority precedence and Intra-Class scheduling */
+ FMAN_PORT_DEQ_ACTIVE_FQ,
+ /* Active FQ precedence and Intra-Class scheduling */
+ FMAN_PORT_DEQ_ACTIVE_FQ_NO_ICS
+ /* Active FQ precedence and override Intra-Class scheduling */
+};
+
+/* External buffer pools configuration */
+struct fman_port_bpools {
+ u8 count; /* Num of pools to set up */
+ bool counters_enable; /* Enable allocate counters */
+ u8 grp_bp_depleted_num;
+ /* Number of depleted pools - if reached the BMI indicates
+ * the MAC to send a pause frame
+ */
+ struct {
+ u8 bpid; /* BM pool ID */
+ u16 size;
+ /* Pool's size - must be in ascending order */
+ bool is_backup;
+ /* If this is a backup pool */
+ bool grp_bp_depleted;
+ /* Consider this buffer in multiple pools depletion criteria */
+ bool single_bp_depleted;
+ /* Consider this buffer in single pool depletion criteria */
+ } bpool[FMAN_PORT_MAX_EXT_POOLS_NUM];
+};
+
+struct fman_port_cfg {
+ u32 dflt_fqid;
+ u32 err_fqid;
+ u8 deq_sp;
+ bool deq_high_priority;
+ enum fman_port_deq_type deq_type;
+ enum fman_port_deq_prefetch deq_prefetch_option;
+ u16 deq_byte_cnt;
+ u8 cheksum_last_bytes_ignore;
+ u8 rx_cut_end_bytes;
+ struct fman_buf_pool_depletion buf_pool_depletion;
+ struct fman_ext_pools ext_buf_pools;
+ u32 tx_fifo_min_level;
+ u32 tx_fifo_low_comf_level;
+ u32 rx_pri_elevation;
+ u32 rx_fifo_thr;
+ struct fman_sp_buf_margins buf_margins;
+ u32 int_buf_start_margin;
+ struct fman_sp_int_context_data_copy int_context;
+ u32 discard_mask;
+ u32 err_mask;
+ struct fman_buffer_prefix_content buffer_prefix_content;
+ bool dont_release_buf;
+
+ u8 rx_fd_bits;
+ u32 tx_fifo_deq_pipeline_depth;
+ bool errata_A006320;
+ bool excessive_threshold_register;
+ bool fmbm_tfne_has_features;
+
+ enum fman_port_dma_swap dma_swap_data;
+ enum fman_port_color color;
+};
+
+struct fman_port_rx_pools_params {
+ u8 num_of_pools;
+ u16 second_largest_buf_size;
+ u16 largest_buf_size;
+};
+
+struct fman_port_dts_params {
+ void __iomem *base_addr; /* FMan port virtual memory */
+ enum fman_port_type type; /* Port type */
+ u16 speed; /* Port speed */
+ u8 id; /* HW Port Id */
+ u32 qman_channel_id; /* QMan channel id (non RX only) */
+ struct fman *fman; /* FMan Handle */
+};
+
+struct fman_port {
+ void *fm;
+ struct device *dev;
+ struct fman_rev_info rev_info;
+ u8 port_id;
+ enum fman_port_type port_type;
+ u16 port_speed;
+
+ union fman_port_bmi_regs __iomem *bmi_regs;
+ struct fman_port_qmi_regs __iomem *qmi_regs;
+
+ struct fman_sp_buffer_offsets buffer_offsets;
+
+ u8 internal_buf_offset;
+ struct fman_ext_pools ext_buf_pools;
+
+ u16 max_frame_length;
+ struct fman_port_rsrc open_dmas;
+ struct fman_port_rsrc tasks;
+ struct fman_port_rsrc fifo_bufs;
+ struct fman_port_rx_pools_params rx_pools_params;
+
+ struct fman_port_cfg *cfg;
+ struct fman_port_dts_params dts_params;
+
+ u8 ext_pools_num;
+ u32 max_port_fifo_size;
+ u32 max_num_of_ext_pools;
+ u32 max_num_of_sub_portals;
+ u32 bm_max_num_of_pools;
+};
+
+static int init_bmi_rx(struct fman_port *port)
+{
+ struct fman_port_rx_bmi_regs __iomem *regs = &port->bmi_regs->rx;
+ struct fman_port_cfg *cfg = port->cfg;
+ u32 tmp;
+
+ /* DMA attributes */
+ tmp = (u32)cfg->dma_swap_data << BMI_DMA_ATTR_SWP_SHIFT;
+ /* Enable write optimization */
+ tmp |= BMI_DMA_ATTR_WRITE_OPTIMIZE;
+ iowrite32be(tmp, &regs->fmbm_rda);
+
+ /* Rx FIFO parameters */
+ tmp = (cfg->rx_pri_elevation / PORT_BMI_FIFO_UNITS - 1) <<
+ BMI_RX_FIFO_PRI_ELEVATION_SHIFT;
+ tmp |= cfg->rx_fifo_thr / PORT_BMI_FIFO_UNITS - 1;
+ iowrite32be(tmp, &regs->fmbm_rfp);
+
+ if (cfg->excessive_threshold_register)
+ /* always allow access to the extra resources */
+ iowrite32be(BMI_RX_FIFO_THRESHOLD_ETHE, &regs->fmbm_reth);
+
+ /* Frame end data */
+ tmp = (cfg->cheksum_last_bytes_ignore & BMI_FRAME_END_CS_IGNORE_MASK) <<
+ BMI_FRAME_END_CS_IGNORE_SHIFT;
+ tmp |= (cfg->rx_cut_end_bytes & BMI_RX_FRAME_END_CUT_MASK) <<
+ BMI_RX_FRAME_END_CUT_SHIFT;
+ if (cfg->errata_A006320)
+ tmp &= 0xffe0ffff;
+ iowrite32be(tmp, &regs->fmbm_rfed);
+
+ /* Internal context parameters */
+ tmp = ((cfg->int_context.ext_buf_offset / PORT_IC_OFFSET_UNITS) &
+ BMI_IC_TO_EXT_MASK) << BMI_IC_TO_EXT_SHIFT;
+ tmp |= ((cfg->int_context.int_context_offset / PORT_IC_OFFSET_UNITS) &
+ BMI_IC_FROM_INT_MASK) << BMI_IC_FROM_INT_SHIFT;
+ tmp |= (cfg->int_context.size / PORT_IC_OFFSET_UNITS) &
+ BMI_IC_SIZE_MASK;
+ iowrite32be(tmp, &regs->fmbm_ricp);
+
+ /* Internal buffer offset */
+ tmp = ((cfg->int_buf_start_margin / PORT_IC_OFFSET_UNITS) &
+ BMI_INT_BUF_MARG_MASK) << BMI_INT_BUF_MARG_SHIFT;
+ iowrite32be(tmp, &regs->fmbm_rim);
+
+ /* External buffer margins */
+ tmp = (cfg->buf_margins.start_margins & BMI_EXT_BUF_MARG_START_MASK) <<
+ BMI_EXT_BUF_MARG_START_SHIFT;
+ tmp |= cfg->buf_margins.end_margins & BMI_EXT_BUF_MARG_END_MASK;
+ iowrite32be(tmp, &regs->fmbm_rebm);
+
+ /* Frame attributes */
+ tmp = BMI_CMD_RX_MR_DEF;
+ tmp |= BMI_CMD_ATTR_ORDER;
+ tmp |= (u32)cfg->color << BMI_CMD_ATTR_COLOR_SHIFT;
+ /* Synchronization request */
+ tmp |= BMI_CMD_ATTR_SYNC;
+
+ iowrite32be(tmp, &regs->fmbm_rfca);
+
+ /* NIA */
+ tmp = (u32)cfg->rx_fd_bits << BMI_NEXT_ENG_FD_BITS_SHIFT;
+
+ tmp |= NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME;
+ iowrite32be(tmp, &regs->fmbm_rfne);
+
+ /* Enqueue NIA */
+ iowrite32be(NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR, &regs->fmbm_rfene);
+
+ /* Default/error queues */
+ iowrite32be((cfg->dflt_fqid & DFLT_FQ_ID), &regs->fmbm_rfqid);
+ iowrite32be((cfg->err_fqid & DFLT_FQ_ID), &regs->fmbm_refqid);
+
+ /* Discard/error masks */
+ iowrite32be(cfg->discard_mask, &regs->fmbm_rfsdm);
+ iowrite32be(cfg->err_mask, &regs->fmbm_rfsem);
+
+ return 0;
+}
+
+static int init_bmi_tx(struct fman_port *port)
+{
+ struct fman_port_tx_bmi_regs __iomem *regs = &port->bmi_regs->tx;
+ struct fman_port_cfg *cfg = port->cfg;
+ u32 tmp;
+
+ /* Tx Configuration register */
+ tmp = 0;
+ iowrite32be(tmp, &regs->fmbm_tcfg);
+
+ /* DMA attributes */
+ tmp = (u32)cfg->dma_swap_data << BMI_DMA_ATTR_SWP_SHIFT;
+ iowrite32be(tmp, &regs->fmbm_tda);
+
+ /* Tx FIFO parameters */
+ tmp = (cfg->tx_fifo_min_level / PORT_BMI_FIFO_UNITS) <<
+ BMI_TX_FIFO_MIN_FILL_SHIFT;
+ tmp |= ((cfg->tx_fifo_deq_pipeline_depth - 1) &
+ BMI_FIFO_PIPELINE_DEPTH_MASK) << BMI_FIFO_PIPELINE_DEPTH_SHIFT;
+ tmp |= (cfg->tx_fifo_low_comf_level / PORT_BMI_FIFO_UNITS) - 1;
+ iowrite32be(tmp, &regs->fmbm_tfp);
+
+ /* Frame end data */
+ tmp = (cfg->cheksum_last_bytes_ignore & BMI_FRAME_END_CS_IGNORE_MASK) <<
+ BMI_FRAME_END_CS_IGNORE_SHIFT;
+ iowrite32be(tmp, &regs->fmbm_tfed);
+
+ /* Internal context parameters */
+ tmp = ((cfg->int_context.ext_buf_offset / PORT_IC_OFFSET_UNITS) &
+ BMI_IC_TO_EXT_MASK) << BMI_IC_TO_EXT_SHIFT;
+ tmp |= ((cfg->int_context.int_context_offset / PORT_IC_OFFSET_UNITS) &
+ BMI_IC_FROM_INT_MASK) << BMI_IC_FROM_INT_SHIFT;
+ tmp |= (cfg->int_context.size / PORT_IC_OFFSET_UNITS) &
+ BMI_IC_SIZE_MASK;
+ iowrite32be(tmp, &regs->fmbm_ticp);
+
+ /* Frame attributes */
+ tmp = BMI_CMD_TX_MR_DEF;
+ tmp |= BMI_CMD_ATTR_ORDER;
+ tmp |= (u32)cfg->color << BMI_CMD_ATTR_COLOR_SHIFT;
+ iowrite32be(tmp, &regs->fmbm_tfca);
+
+ /* Dequeue NIA + enqueue NIA */
+ iowrite32be(NIA_ENG_QMI_DEQ, &regs->fmbm_tfdne);
+ iowrite32be(NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR, &regs->fmbm_tfene);
+ if (cfg->fmbm_tfne_has_features)
+ iowrite32be(!cfg->dflt_fqid ?
+ BMI_EBD_EN | NIA_BMI_AC_FETCH_ALL_FRAME :
+ NIA_BMI_AC_FETCH_ALL_FRAME, &regs->fmbm_tfne);
+ if (!cfg->dflt_fqid && cfg->dont_release_buf) {
+ iowrite32be(DFLT_FQ_ID, &regs->fmbm_tcfqid);
+ iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE,
+ &regs->fmbm_tfene);
+ if (cfg->fmbm_tfne_has_features)
+ iowrite32be(ioread32be(&regs->fmbm_tfne) & ~BMI_EBD_EN,
+ &regs->fmbm_tfne);
+ }
+
+ /* Confirmation/error queues */
+ if (cfg->dflt_fqid || !cfg->dont_release_buf)
+ iowrite32be(cfg->dflt_fqid & DFLT_FQ_ID, &regs->fmbm_tcfqid);
+ iowrite32be((cfg->err_fqid & DFLT_FQ_ID), &regs->fmbm_tefqid);
+
+ return 0;
+}
+
+static int init_qmi(struct fman_port *port)
+{
+ struct fman_port_qmi_regs __iomem *regs = port->qmi_regs;
+ struct fman_port_cfg *cfg = port->cfg;
+ u32 tmp;
+
+ /* Rx port configuration */
+ if (port->port_type == FMAN_PORT_TYPE_RX) {
+ /* Enqueue NIA */
+ iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_RELEASE, &regs->fmqm_pnen);
+ return 0;
+ }
+
+ /* Continue with Tx port configuration */
+ if (port->port_type == FMAN_PORT_TYPE_TX) {
+ /* Enqueue NIA */
+ iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE,
+ &regs->fmqm_pnen);
+ /* Dequeue NIA */
+ iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX, &regs->fmqm_pndn);
+ }
+
+ /* Dequeue Configuration register */
+ tmp = 0;
+ if (cfg->deq_high_priority)
+ tmp |= QMI_DEQ_CFG_PRI;
+
+ switch (cfg->deq_type) {
+ case FMAN_PORT_DEQ_BY_PRI:
+ tmp |= QMI_DEQ_CFG_TYPE1;
+ break;
+ case FMAN_PORT_DEQ_ACTIVE_FQ:
+ tmp |= QMI_DEQ_CFG_TYPE2;
+ break;
+ case FMAN_PORT_DEQ_ACTIVE_FQ_NO_ICS:
+ tmp |= QMI_DEQ_CFG_TYPE3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (cfg->deq_prefetch_option) {
+ case FMAN_PORT_DEQ_NO_PREFETCH:
+ break;
+ case FMAN_PORT_DEQ_PART_PREFETCH:
+ tmp |= QMI_DEQ_CFG_PREFETCH_PARTIAL;
+ break;
+ case FMAN_PORT_DEQ_FULL_PREFETCH:
+ tmp |= QMI_DEQ_CFG_PREFETCH_FULL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ tmp |= (cfg->deq_sp & QMI_DEQ_CFG_SP_MASK) << QMI_DEQ_CFG_SP_SHIFT;
+ tmp |= cfg->deq_byte_cnt;
+ iowrite32be(tmp, &regs->fmqm_pndc);
+
+ return 0;
+}
+
+static int init(struct fman_port *port)
+{
+ int err;
+
+ /* Init BMI registers */
+ switch (port->port_type) {
+ case FMAN_PORT_TYPE_RX:
+ err = init_bmi_rx(port);
+ break;
+ case FMAN_PORT_TYPE_TX:
+ err = init_bmi_tx(port);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (err)
+ return err;
+
+ /* Init QMI registers */
+ err = init_qmi(port);
+ return err;
+
+ return 0;
+}
+
+static int set_bpools(const struct fman_port *port,
+ const struct fman_port_bpools *bp)
+{
+ u32 __iomem *bp_reg, *bp_depl_reg;
+ u32 tmp;
+ u8 i, max_bp_num;
+ bool grp_depl_used = false, rx_port;
+
+ switch (port->port_type) {
+ case FMAN_PORT_TYPE_RX:
+ max_bp_num = port->ext_pools_num;
+ rx_port = true;
+ bp_reg = port->bmi_regs->rx.fmbm_ebmpi;
+ bp_depl_reg = &port->bmi_regs->rx.fmbm_mpd;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (rx_port) {
+ /* Check buffers are provided in ascending order */
+ for (i = 0; (i < (bp->count - 1) &&
+ (i < FMAN_PORT_MAX_EXT_POOLS_NUM - 1)); i++) {
+ if (bp->bpool[i].size > bp->bpool[i + 1].size)
+ return -EINVAL;
+ }
+ }
+
+ /* Set up external buffers pools */
+ for (i = 0; i < bp->count; i++) {
+ tmp = BMI_EXT_BUF_POOL_VALID;
+ tmp |= ((u32)bp->bpool[i].bpid <<
+ BMI_EXT_BUF_POOL_ID_SHIFT) & BMI_EXT_BUF_POOL_ID_MASK;
+
+ if (rx_port) {
+ if (bp->counters_enable)
+ tmp |= BMI_EXT_BUF_POOL_EN_COUNTER;
+
+ if (bp->bpool[i].is_backup)
+ tmp |= BMI_EXT_BUF_POOL_BACKUP;
+
+ tmp |= (u32)bp->bpool[i].size;
+ }
+
+ iowrite32be(tmp, &bp_reg[i]);
+ }
+
+ /* Clear unused pools */
+ for (i = bp->count; i < max_bp_num; i++)
+ iowrite32be(0, &bp_reg[i]);
+
+ /* Pools depletion */
+ tmp = 0;
+ for (i = 0; i < FMAN_PORT_MAX_EXT_POOLS_NUM; i++) {
+ if (bp->bpool[i].grp_bp_depleted) {
+ grp_depl_used = true;
+ tmp |= 0x80000000 >> i;
+ }
+
+ if (bp->bpool[i].single_bp_depleted)
+ tmp |= 0x80 >> i;
+ }
+
+ if (grp_depl_used)
+ tmp |= ((u32)bp->grp_bp_depleted_num - 1) <<
+ BMI_POOL_DEP_NUM_OF_POOLS_SHIFT;
+
+ iowrite32be(tmp, bp_depl_reg);
+ return 0;
+}
+
+static bool is_init_done(struct fman_port_cfg *cfg)
+{
+ /* Checks if FMan port driver parameters were initialized */
+ if (!cfg)
+ return true;
+
+ return false;
+}
+
+static int verify_size_of_fifo(struct fman_port *port)
+{
+ u32 min_fifo_size_required = 0, opt_fifo_size_for_b2b = 0;
+
+ /* TX Ports */
+ if (port->port_type == FMAN_PORT_TYPE_TX) {
+ min_fifo_size_required = (u32)
+ (roundup(port->max_frame_length,
+ FMAN_BMI_FIFO_UNITS) + (3 * FMAN_BMI_FIFO_UNITS));
+
+ min_fifo_size_required +=
+ port->cfg->tx_fifo_deq_pipeline_depth *
+ FMAN_BMI_FIFO_UNITS;
+
+ opt_fifo_size_for_b2b = min_fifo_size_required;
+
+ /* Add some margin for back-to-back capability to improve
+ * performance, allows the hardware to pipeline new frame dma
+ * while the previous frame not yet transmitted.
+ */
+ if (port->port_speed == 10000)
+ opt_fifo_size_for_b2b += 3 * FMAN_BMI_FIFO_UNITS;
+ else
+ opt_fifo_size_for_b2b += 2 * FMAN_BMI_FIFO_UNITS;
+ }
+
+ /* RX Ports */
+ else if (port->port_type == FMAN_PORT_TYPE_RX) {
+ if (port->rev_info.major >= 6)
+ min_fifo_size_required = (u32)
+ (roundup(port->max_frame_length,
+ FMAN_BMI_FIFO_UNITS) +
+ (5 * FMAN_BMI_FIFO_UNITS));
+ /* 4 according to spec + 1 for FOF>0 */
+ else
+ min_fifo_size_required = (u32)
+ (roundup(min(port->max_frame_length,
+ port->rx_pools_params.largest_buf_size),
+ FMAN_BMI_FIFO_UNITS) +
+ (7 * FMAN_BMI_FIFO_UNITS));
+
+ opt_fifo_size_for_b2b = min_fifo_size_required;
+
+ /* Add some margin for back-to-back capability to improve
+ * performance,allows the hardware to pipeline new frame dma
+ * while the previous frame not yet transmitted.
+ */
+ if (port->port_speed == 10000)
+ opt_fifo_size_for_b2b += 8 * FMAN_BMI_FIFO_UNITS;
+ else
+ opt_fifo_size_for_b2b += 3 * FMAN_BMI_FIFO_UNITS;
+ }
+
+ WARN_ON(min_fifo_size_required <= 0);
+ WARN_ON(opt_fifo_size_for_b2b < min_fifo_size_required);
+
+ /* Verify the size */
+ if (port->fifo_bufs.num < min_fifo_size_required)
+ dev_dbg(port->dev, "%s: FIFO size should be enlarged to %d bytes\n",
+ __func__, min_fifo_size_required);
+ else if (port->fifo_bufs.num < opt_fifo_size_for_b2b)
+ dev_dbg(port->dev, "%s: For b2b processing,FIFO may be enlarged to %d bytes\n",
+ __func__, opt_fifo_size_for_b2b);
+
+ return 0;
+}
+
+static int set_ext_buffer_pools(struct fman_port *port)
+{
+ struct fman_ext_pools *ext_buf_pools = &port->cfg->ext_buf_pools;
+ struct fman_buf_pool_depletion *buf_pool_depletion =
+ &port->cfg->buf_pool_depletion;
+ u8 ordered_array[FMAN_PORT_MAX_EXT_POOLS_NUM];
+ u16 sizes_array[BM_MAX_NUM_OF_POOLS];
+ int i = 0, j = 0, err;
+ struct fman_port_bpools bpools;
+
+ memset(&ordered_array, 0, sizeof(u8) * FMAN_PORT_MAX_EXT_POOLS_NUM);
+ memset(&sizes_array, 0, sizeof(u16) * BM_MAX_NUM_OF_POOLS);
+ memcpy(&port->ext_buf_pools, ext_buf_pools,
+ sizeof(struct fman_ext_pools));
+
+ fman_sp_set_buf_pools_in_asc_order_of_buf_sizes(ext_buf_pools,
+ ordered_array,
+ sizes_array);
+
+ memset(&bpools, 0, sizeof(struct fman_port_bpools));
+ bpools.count = ext_buf_pools->num_of_pools_used;
+ bpools.counters_enable = true;
+ for (i = 0; i < ext_buf_pools->num_of_pools_used; i++) {
+ bpools.bpool[i].bpid = ordered_array[i];
+ bpools.bpool[i].size = sizes_array[ordered_array[i]];
+ }
+
+ /* save pools parameters for later use */
+ port->rx_pools_params.num_of_pools = ext_buf_pools->num_of_pools_used;
+ port->rx_pools_params.largest_buf_size =
+ sizes_array[ordered_array[ext_buf_pools->num_of_pools_used - 1]];
+ port->rx_pools_params.second_largest_buf_size =
+ sizes_array[ordered_array[ext_buf_pools->num_of_pools_used - 2]];
+
+ /* FMBM_RMPD reg. - pool depletion */
+ if (buf_pool_depletion->pools_grp_mode_enable) {
+ bpools.grp_bp_depleted_num = buf_pool_depletion->num_of_pools;
+ for (i = 0; i < port->bm_max_num_of_pools; i++) {
+ if (buf_pool_depletion->pools_to_consider[i]) {
+ for (j = 0; j < ext_buf_pools->
+ num_of_pools_used; j++) {
+ if (i == ordered_array[j]) {
+ bpools.bpool[j].
+ grp_bp_depleted = true;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ if (buf_pool_depletion->single_pool_mode_enable) {
+ for (i = 0; i < port->bm_max_num_of_pools; i++) {
+ if (buf_pool_depletion->
+ pools_to_consider_for_single_mode[i]) {
+ for (j = 0; j < ext_buf_pools->
+ num_of_pools_used; j++) {
+ if (i == ordered_array[j]) {
+ bpools.bpool[j].
+ single_bp_depleted = true;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ err = set_bpools(port, &bpools);
+ if (err != 0) {
+ dev_err(port->dev, "%s: set_bpools() failed\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int init_low_level_driver(struct fman_port *port)
+{
+ struct fman_port_cfg *cfg = port->cfg;
+ u32 tmp_val;
+
+ switch (port->port_type) {
+ case FMAN_PORT_TYPE_RX:
+ cfg->err_mask = (RX_ERRS_TO_ENQ & ~cfg->discard_mask);
+ break;
+ default:
+ break;
+ }
+
+ tmp_val = (u32)((port->internal_buf_offset % OFFSET_UNITS) ?
+ (port->internal_buf_offset / OFFSET_UNITS + 1) :
+ (port->internal_buf_offset / OFFSET_UNITS));
+ port->internal_buf_offset = (u8)(tmp_val * OFFSET_UNITS);
+ port->cfg->int_buf_start_margin = port->internal_buf_offset;
+
+ if (init(port) != 0) {
+ dev_err(port->dev, "%s: fman port initialization failed\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ /* The code bellow is a trick so the FM will not release the buffer
+ * to BM nor will try to enqueue the frame to QM
+ */
+ if (port->port_type == FMAN_PORT_TYPE_TX) {
+ if (!cfg->dflt_fqid && cfg->dont_release_buf) {
+ /* override fmbm_tcfqid 0 with a false non-0 value.
+ * This will force FM to act according to tfene.
+ * Otherwise, if fmbm_tcfqid is 0 the FM will release
+ * buffers to BM regardless of fmbm_tfene
+ */
+ iowrite32be(0xFFFFFF, &port->bmi_regs->tx.fmbm_tcfqid);
+ iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE,
+ &port->bmi_regs->tx.fmbm_tfene);
+ }
+ }
+
+ return 0;
+}
+
+static int fill_soc_specific_params(struct fman_port *port)
+{
+ u32 bmi_max_fifo_size;
+
+ bmi_max_fifo_size = fman_get_bmi_max_fifo_size(port->fm);
+ port->max_port_fifo_size = MAX_PORT_FIFO_SIZE(bmi_max_fifo_size);
+ port->bm_max_num_of_pools = 64;
+
+ /* P4080 - Major 2
+ * P2041/P3041/P5020/P5040 - Major 3
+ * Tx/Bx - Major 6
+ */
+ switch (port->rev_info.major) {
+ case 2:
+ case 3:
+ port->max_num_of_ext_pools = 4;
+ port->max_num_of_sub_portals = 12;
+ break;
+
+ case 6:
+ port->max_num_of_ext_pools = 8;
+ port->max_num_of_sub_portals = 16;
+ break;
+
+ default:
+ dev_err(port->dev, "%s: Unsupported FMan version\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int get_dflt_fifo_deq_pipeline_depth(u8 major, enum fman_port_type type,
+ u16 speed)
+{
+ switch (type) {
+ case FMAN_PORT_TYPE_RX:
+ case FMAN_PORT_TYPE_TX:
+ switch (speed) {
+ case 10000:
+ return 4;
+ case 1000:
+ if (major >= 6)
+ return 2;
+ else
+ return 1;
+ default:
+ return 0;
+ }
+ default:
+ return 0;
+ }
+}
+
+static int get_dflt_num_of_tasks(u8 major, enum fman_port_type type,
+ u16 speed)
+{
+ switch (type) {
+ case FMAN_PORT_TYPE_RX:
+ case FMAN_PORT_TYPE_TX:
+ switch (speed) {
+ case 10000:
+ return 16;
+ case 1000:
+ if (major >= 6)
+ return 4;
+ else
+ return 3;
+ default:
+ return 0;
+ }
+ default:
+ return 0;
+ }
+}
+
+static int get_dflt_extra_num_of_tasks(u8 major, enum fman_port_type type,
+ u16 speed)
+{
+ switch (type) {
+ case FMAN_PORT_TYPE_RX:
+ /* FMan V3 */
+ if (major >= 6)
+ return 0;
+
+ /* FMan V2 */
+ if (speed == 10000)
+ return 8;
+ else
+ return 2;
+ case FMAN_PORT_TYPE_TX:
+ default:
+ return 0;
+ }
+}
+
+static int get_dflt_num_of_open_dmas(u8 major, enum fman_port_type type,
+ u16 speed)
+{
+ int val;
+
+ if (major >= 6) {
+ switch (type) {
+ case FMAN_PORT_TYPE_TX:
+ if (speed == 10000)
+ val = 12;
+ else
+ val = 3;
+ break;
+ case FMAN_PORT_TYPE_RX:
+ if (speed == 10000)
+ val = 8;
+ else
+ val = 2;
+ break;
+ default:
+ return 0;
+ }
+ } else {
+ switch (type) {
+ case FMAN_PORT_TYPE_TX:
+ case FMAN_PORT_TYPE_RX:
+ if (speed == 10000)
+ val = 8;
+ else
+ val = 1;
+ break;
+ default:
+ val = 0;
+ }
+ }
+
+ return val;
+}
+
+static int get_dflt_extra_num_of_open_dmas(u8 major, enum fman_port_type type,
+ u16 speed)
+{
+ /* FMan V3 */
+ if (major >= 6)
+ return 0;
+
+ /* FMan V2 */
+ switch (type) {
+ case FMAN_PORT_TYPE_RX:
+ case FMAN_PORT_TYPE_TX:
+ if (speed == 10000)
+ return 8;
+ else
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static int get_dflt_num_of_fifo_bufs(u8 major, enum fman_port_type type,
+ u16 speed)
+{
+ int val;
+
+ if (major >= 6) {
+ switch (type) {
+ case FMAN_PORT_TYPE_TX:
+ if (speed == 10000)
+ val = 64;
+ else
+ val = 50;
+ break;
+ case FMAN_PORT_TYPE_RX:
+ if (speed == 10000)
+ val = 96;
+ else
+ val = 50;
+ break;
+ default:
+ val = 0;
+ }
+ } else {
+ switch (type) {
+ case FMAN_PORT_TYPE_TX:
+ if (speed == 10000)
+ val = 48;
+ else
+ val = 44;
+ break;
+ case FMAN_PORT_TYPE_RX:
+ if (speed == 10000)
+ val = 48;
+ else
+ val = 45;
+ break;
+ default:
+ val = 0;
+ }
+ }
+
+ return val;
+}
+
+static void set_dflt_cfg(struct fman_port *port,
+ struct fman_port_params *port_params)
+{
+ struct fman_port_cfg *cfg = port->cfg;
+
+ cfg->dma_swap_data = FMAN_PORT_DMA_NO_SWAP;
+ cfg->color = FMAN_PORT_COLOR_GREEN;
+ cfg->rx_cut_end_bytes = DFLT_PORT_CUT_BYTES_FROM_END;
+ cfg->rx_pri_elevation = BMI_PRIORITY_ELEVATION_LEVEL;
+ cfg->rx_fifo_thr = BMI_FIFO_THRESHOLD;
+ cfg->tx_fifo_low_comf_level = (5 * 1024);
+ cfg->deq_type = FMAN_PORT_DEQ_BY_PRI;
+ cfg->deq_prefetch_option = FMAN_PORT_DEQ_FULL_PREFETCH;
+ cfg->tx_fifo_deq_pipeline_depth =
+ BMI_DEQUEUE_PIPELINE_DEPTH(port->port_type, port->port_speed);
+ cfg->deq_byte_cnt = QMI_BYTE_COUNT_LEVEL_CONTROL(port->port_type);
+
+ cfg->rx_pri_elevation =
+ DFLT_PORT_RX_FIFO_PRI_ELEVATION_LEV(port->max_port_fifo_size);
+ port->cfg->rx_fifo_thr =
+ DFLT_PORT_RX_FIFO_THRESHOLD(port->rev_info.major,
+ port->max_port_fifo_size);
+
+ if ((port->rev_info.major == 6) &&
+ ((port->rev_info.minor == 0) || (port->rev_info.minor == 3)))
+ cfg->errata_A006320 = true;
+
+ /* Excessive Threshold register - exists for pre-FMv3 chips only */
+ if (port->rev_info.major < 6)
+ cfg->excessive_threshold_register = true;
+ else
+ cfg->fmbm_tfne_has_features = true;
+
+ cfg->buffer_prefix_content.data_align =
+ DFLT_PORT_BUFFER_PREFIX_CONTEXT_DATA_ALIGN;
+}
+
+static void set_rx_dflt_cfg(struct fman_port *port,
+ struct fman_port_params *port_params)
+{
+ port->cfg->discard_mask = DFLT_PORT_ERRORS_TO_DISCARD;
+
+ memcpy(&port->cfg->ext_buf_pools,
+ &port_params->specific_params.rx_params.ext_buf_pools,
+ sizeof(struct fman_ext_pools));
+ port->cfg->err_fqid =
+ port_params->specific_params.rx_params.err_fqid;
+ port->cfg->dflt_fqid =
+ port_params->specific_params.rx_params.dflt_fqid;
+}
+
+static void set_tx_dflt_cfg(struct fman_port *port,
+ struct fman_port_params *port_params,
+ struct fman_port_dts_params *dts_params)
+{
+ port->cfg->tx_fifo_deq_pipeline_depth =
+ get_dflt_fifo_deq_pipeline_depth(port->rev_info.major,
+ port->port_type,
+ port->port_speed);
+ port->cfg->err_fqid =
+ port_params->specific_params.non_rx_params.err_fqid;
+ port->cfg->deq_sp =
+ (u8)(dts_params->qman_channel_id & QMI_DEQ_CFG_SUBPORTAL_MASK);
+ port->cfg->dflt_fqid =
+ port_params->specific_params.non_rx_params.dflt_fqid;
+ port->cfg->deq_high_priority = true;
+}
+
+/**
+ * fman_port_config
+ * @port: Pointer to the port structure
+ * @params: Pointer to data structure of parameters
+ *
+ * Creates a descriptor for the FM PORT module.
+ * The routine returns a pointer to the FM PORT object.
+ * This descriptor must be passed as first parameter to all other FM PORT
+ * function calls.
+ * No actual initialization or configuration of FM hardware is done by this
+ * routine.
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+int fman_port_config(struct fman_port *port, struct fman_port_params *params)
+{
+ void __iomem *base_addr = port->dts_params.base_addr;
+ int err;
+
+ /* Allocate the FM driver's parameters structure */
+ port->cfg = kzalloc(sizeof(*port->cfg), GFP_KERNEL);
+ if (!port->cfg)
+ goto err_params;
+
+ /* Initialize FM port parameters which will be kept by the driver */
+ port->port_type = port->dts_params.type;
+ port->port_speed = port->dts_params.speed;
+ port->port_id = port->dts_params.id;
+ port->fm = port->dts_params.fman;
+ port->ext_pools_num = (u8)8;
+
+ /* get FM revision */
+ fman_get_revision(port->fm, &port->rev_info);
+
+ err = fill_soc_specific_params(port);
+ if (err)
+ goto err_port_cfg;
+
+ switch (port->port_type) {
+ case FMAN_PORT_TYPE_RX:
+ set_rx_dflt_cfg(port, params);
+ case FMAN_PORT_TYPE_TX:
+ set_tx_dflt_cfg(port, params, &port->dts_params);
+ default:
+ set_dflt_cfg(port, params);
+ }
+
+ /* Continue with other parameters */
+ /* set memory map pointers */
+ port->bmi_regs = base_addr + BMI_PORT_REGS_OFFSET;
+ port->qmi_regs = base_addr + QMI_PORT_REGS_OFFSET;
+
+ port->max_frame_length = DFLT_PORT_MAX_FRAME_LENGTH;
+ /* resource distribution. */
+
+ port->fifo_bufs.num =
+ get_dflt_num_of_fifo_bufs(port->rev_info.major, port->port_type,
+ port->port_speed) * FMAN_BMI_FIFO_UNITS;
+ port->fifo_bufs.extra =
+ DFLT_PORT_EXTRA_NUM_OF_FIFO_BUFS * FMAN_BMI_FIFO_UNITS;
+
+ port->open_dmas.num =
+ get_dflt_num_of_open_dmas(port->rev_info.major,
+ port->port_type, port->port_speed);
+ port->open_dmas.extra =
+ get_dflt_extra_num_of_open_dmas(port->rev_info.major,
+ port->port_type, port->port_speed);
+ port->tasks.num =
+ get_dflt_num_of_tasks(port->rev_info.major,
+ port->port_type, port->port_speed);
+ port->tasks.extra =
+ get_dflt_extra_num_of_tasks(port->rev_info.major,
+ port->port_type, port->port_speed);
+
+ /* FM_HEAVY_TRAFFIC_SEQUENCER_HANG_ERRATA_FMAN_A006981 errata
+ * workaround
+ */
+ if ((port->rev_info.major == 6) && (port->rev_info.minor == 0) &&
+ (((port->port_type == FMAN_PORT_TYPE_TX) &&
+ (port->port_speed == 1000)))) {
+ port->open_dmas.num = 16;
+ port->open_dmas.extra = 0;
+ }
+
+ if (port->rev_info.major >= 6 &&
+ port->port_type == FMAN_PORT_TYPE_TX &&
+ port->port_speed == 1000) {
+ /* FM_WRONG_RESET_VALUES_ERRATA_FMAN_A005127 Errata
+ * workaround
+ */
+ if (port->rev_info.major >= 6) {
+ u32 reg;
+
+ reg = 0x00001013;
+ iowrite32be(reg, &port->bmi_regs->tx.fmbm_tfp);
+ }
+ }
+
+ return 0;
+
+err_port_cfg:
+ kfree(port->cfg);
+err_params:
+ kfree(port);
+ return -EINVAL;
+}
+EXPORT_SYMBOL(fman_port_config);
+
+/**
+ * fman_port_init
+ * port: A pointer to a FM Port module.
+ * Initializes the FM PORT module by defining the software structure and
+ * configuring the hardware registers.
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+int fman_port_init(struct fman_port *port)
+{
+ struct fman_port_cfg *cfg;
+ int err;
+ struct fman_port_init_params params;
+
+ if (is_init_done(port->cfg))
+ return -EINVAL;
+
+ err = fman_sp_build_buffer_struct(&port->cfg->int_context,
+ &port->cfg->buffer_prefix_content,
+ &port->cfg->buf_margins,
+ &port->buffer_offsets,
+ &port->internal_buf_offset);
+ if (err)
+ return err;
+
+ cfg = port->cfg;
+
+ if (port->port_type == FMAN_PORT_TYPE_RX) {
+ /* Call the external Buffer routine which also checks fifo
+ * size and updates it if necessary
+ */
+ /* define external buffer pools and pool depletion */
+ err = set_ext_buffer_pools(port);
+ if (err)
+ return err;
+ /* check if the largest external buffer pool is large enough */
+ if (cfg->buf_margins.start_margins + MIN_EXT_BUF_SIZE +
+ cfg->buf_margins.end_margins >
+ port->rx_pools_params.largest_buf_size) {
+ dev_err(port->dev, "%s: buf_margins.start_margins (%d) + minimum buf size (64) + buf_margins.end_margins (%d) is larger than maximum external buffer size (%d)\n",
+ __func__, cfg->buf_margins.start_margins,
+ cfg->buf_margins.end_margins,
+ port->rx_pools_params.largest_buf_size);
+ return -EINVAL;
+ }
+ }
+
+ /* Call FM module routine for communicating parameters */
+ memset(&params, 0, sizeof(params));
+ params.port_id = port->port_id;
+ params.port_type = port->port_type;
+ params.port_speed = port->port_speed;
+ params.num_of_tasks = (u8)port->tasks.num;
+ params.num_of_extra_tasks = (u8)port->tasks.extra;
+ params.num_of_open_dmas = (u8)port->open_dmas.num;
+ params.num_of_extra_open_dmas = (u8)port->open_dmas.extra;
+
+ if (port->fifo_bufs.num) {
+ err = verify_size_of_fifo(port);
+ if (err)
+ return err;
+ }
+ params.size_of_fifo = port->fifo_bufs.num;
+ params.extra_size_of_fifo = port->fifo_bufs.extra;
+ params.deq_pipeline_depth = port->cfg->tx_fifo_deq_pipeline_depth;
+ params.max_frame_length = port->max_frame_length;
+
+ err = fman_set_port_params(port->fm, &params);
+ if (err)
+ return err;
+
+ err = init_low_level_driver(port);
+ if (err)
+ return err;
+
+ kfree(port->cfg);
+ port->cfg = NULL;
+
+ return 0;
+}
+EXPORT_SYMBOL(fman_port_init);
+
+/**
+ * fman_port_cfg_buf_prefix_content
+ * @port A pointer to a FM Port module.
+ * @buffer_prefix_content A structure of parameters describing
+ * the structure of the buffer.
+ * Out parameter:
+ * Start margin - offset of data from
+ * start of external buffer.
+ * Defines the structure, size and content of the application buffer.
+ * The prefix, in Tx ports, if 'pass_prs_result', the application should set
+ * a value to their offsets in the prefix of the FM will save the first
+ * 'priv_data_size', than, depending on 'pass_prs_result' and
+ * 'pass_time_stamp', copy parse result and timeStamp, and the packet itself
+ * (in this order), to the application buffer, and to offset.
+ * Calling this routine changes the buffer margins definitions in the internal
+ * driver data base from its default configuration:
+ * Data size: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PRIV_DATA_SIZE]
+ * Pass Parser result: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PASS_PRS_RESULT].
+ * Pass timestamp: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PASS_TIME_STAMP].
+ * May be used for all ports
+ *
+ * Allowed only following fman_port_config() and before fman_port_init().
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+int fman_port_cfg_buf_prefix_content(struct fman_port *port,
+ struct fman_buffer_prefix_content *
+ buffer_prefix_content)
+{
+ if (is_init_done(port->cfg))
+ return -EINVAL;
+
+ memcpy(&port->cfg->buffer_prefix_content,
+ buffer_prefix_content,
+ sizeof(struct fman_buffer_prefix_content));
+ /* if data_align was not initialized by user,
+ * we return to driver's default
+ */
+ if (!port->cfg->buffer_prefix_content.data_align)
+ port->cfg->buffer_prefix_content.data_align =
+ DFLT_PORT_BUFFER_PREFIX_CONTEXT_DATA_ALIGN;
+
+ return 0;
+}
+EXPORT_SYMBOL(fman_port_cfg_buf_prefix_content);
+
+/**
+ * fman_port_disable
+ * port: A pointer to a FM Port module.
+ *
+ * Gracefully disable an FM port. The port will not start new tasks after all
+ * tasks associated with the port are terminated.
+ *
+ * This is a blocking routine, it returns after port is gracefully stopped,
+ * i.e. the port will not except new frames, but it will finish all frames
+ * or tasks which were already began.
+ * Allowed only following fman_port_init().
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+int fman_port_disable(struct fman_port *port)
+{
+ u32 __iomem *bmi_cfg_reg, *bmi_status_reg, tmp;
+ bool rx_port, failure = false;
+ int count;
+
+ if (!is_init_done(port->cfg))
+ return -EINVAL;
+
+ switch (port->port_type) {
+ case FMAN_PORT_TYPE_RX:
+ bmi_cfg_reg = &port->bmi_regs->rx.fmbm_rcfg;
+ bmi_status_reg = &port->bmi_regs->rx.fmbm_rst;
+ rx_port = true;
+ break;
+ case FMAN_PORT_TYPE_TX:
+ bmi_cfg_reg = &port->bmi_regs->tx.fmbm_tcfg;
+ bmi_status_reg = &port->bmi_regs->tx.fmbm_tst;
+ rx_port = false;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Disable QMI */
+ if (!rx_port) {
+ tmp = ioread32be(&port->qmi_regs->fmqm_pnc) & ~QMI_PORT_CFG_EN;
+ iowrite32be(tmp, &port->qmi_regs->fmqm_pnc);
+
+ /* Wait for QMI to finish FD handling */
+ count = 100;
+ do {
+ udelay(10);
+ tmp = ioread32be(&port->qmi_regs->fmqm_pns);
+ } while ((tmp & QMI_PORT_STATUS_DEQ_FD_BSY) && --count);
+
+ if (count == 0) {
+ /* Timeout */
+ failure = true;
+ }
+ }
+
+ /* Disable BMI */
+ tmp = ioread32be(bmi_cfg_reg) & ~BMI_PORT_CFG_EN;
+ iowrite32be(tmp, bmi_cfg_reg);
+
+ /* Wait for graceful stop end */
+ count = 500;
+ do {
+ udelay(10);
+ tmp = ioread32be(bmi_status_reg);
+ } while ((tmp & BMI_PORT_STATUS_BSY) && --count);
+
+ if (count == 0) {
+ /* Timeout */
+ failure = true;
+ }
+
+ if (failure)
+ dev_dbg(port->dev, "%s: FMan Port[%d]: BMI or QMI is Busy. Port forced down\n",
+ __func__, port->port_id);
+
+ return 0;
+}
+EXPORT_SYMBOL(fman_port_disable);
+
+/**
+ * fman_port_enable
+ * port: A pointer to a FM Port module.
+ *
+ * A runtime routine provided to allow disable/enable of port.
+ *
+ * Allowed only following fman_port_init().
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+int fman_port_enable(struct fman_port *port)
+{
+ u32 __iomem *bmi_cfg_reg, tmp;
+ bool rx_port;
+
+ if (!is_init_done(port->cfg))
+ return -EINVAL;
+
+ switch (port->port_type) {
+ case FMAN_PORT_TYPE_RX:
+ bmi_cfg_reg = &port->bmi_regs->rx.fmbm_rcfg;
+ rx_port = true;
+ break;
+ case FMAN_PORT_TYPE_TX:
+ bmi_cfg_reg = &port->bmi_regs->tx.fmbm_tcfg;
+ rx_port = false;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Enable QMI */
+ if (!rx_port) {
+ tmp = ioread32be(&port->qmi_regs->fmqm_pnc) | QMI_PORT_CFG_EN;
+ iowrite32be(tmp, &port->qmi_regs->fmqm_pnc);
+ }
+
+ /* Enable BMI */
+ tmp = ioread32be(bmi_cfg_reg) | BMI_PORT_CFG_EN;
+ iowrite32be(tmp, bmi_cfg_reg);
+
+ return 0;
+}
+EXPORT_SYMBOL(fman_port_enable);
+
+/**
+ * fman_port_bind
+ * dev: FMan Port OF device pointer
+ *
+ * Bind to a specific FMan Port.
+ *
+ * Allowed only after the port was created.
+ *
+ * Return: A pointer to the FMan port device.
+ */
+struct fman_port *fman_port_bind(struct device *dev)
+{
+ return (struct fman_port *)(dev_get_drvdata(get_device(dev)));
+}
+EXPORT_SYMBOL(fman_port_bind);
+
+/**
+ * fman_port_get_qman_channel_id
+ * port: Pointer to the FMan port devuce
+ *
+ * Get the QMan channel ID for the specific port
+ *
+ * Return: QMan channel ID
+ */
+u32 fman_port_get_qman_channel_id(struct fman_port *port)
+{
+ return port->dts_params.qman_channel_id;
+}
+EXPORT_SYMBOL(fman_port_get_qman_channel_id);
+
+static int fman_port_probe(struct platform_device *of_dev)
+{
+ struct fman_port *port;
+ struct fman *fman;
+ struct device_node *fm_node, *port_node;
+ struct resource res;
+ struct resource *dev_res;
+ const u32 *u32_prop;
+ int err = 0, lenp;
+ enum fman_port_type port_type;
+ u16 port_speed;
+ u8 port_id;
+
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ port->dev = &of_dev->dev;
+
+ port_node = of_node_get(of_dev->dev.of_node);
+
+ /* Get the FM node */
+ fm_node = of_get_parent(port_node);
+ if (!fm_node) {
+ dev_err(port->dev, "%s: of_get_parent() failed\n", __func__);
+ err = -ENODEV;
+ goto return_err;
+ }
+
+ fman = dev_get_drvdata(&of_find_device_by_node(fm_node)->dev);
+ of_node_put(fm_node);
+ if (!fman) {
+ err = -EINVAL;
+ goto return_err;
+ }
+
+ u32_prop = (const u32 *)of_get_property(port_node, "cell-index", &lenp);
+ if (!u32_prop) {
+ dev_err(port->dev, "%s: of_get_property(%s, cell-index) failed\n",
+ __func__, port_node->full_name);
+ err = -EINVAL;
+ goto return_err;
+ }
+ if (WARN_ON(lenp != sizeof(u32))) {
+ err = -EINVAL;
+ goto return_err;
+ }
+ port_id = (u8)fdt32_to_cpu(u32_prop[0]);
+
+ port->dts_params.id = port_id;
+
+ if (of_device_is_compatible(port_node, "fsl,fman-v3-port-tx")) {
+ port_type = FMAN_PORT_TYPE_TX;
+ port_speed = 1000;
+ u32_prop = (const u32 *)of_get_property(port_node,
+ "fsl,fman-10g-port",
+ &lenp);
+ if (u32_prop)
+ port_speed = 10000;
+
+ } else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-tx")) {
+ if (port_id >= TX_10G_PORT_BASE)
+ port_speed = 10000;
+ else
+ port_speed = 1000;
+ port_type = FMAN_PORT_TYPE_TX;
+
+ } else if (of_device_is_compatible(port_node, "fsl,fman-v3-port-rx")) {
+ port_type = FMAN_PORT_TYPE_RX;
+ port_speed = 1000;
+ u32_prop = (const u32 *)of_get_property(port_node,
+ "fsl,fman-10g-port", &lenp);
+ if (u32_prop)
+ port_speed = 10000;
+
+ } else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-rx")) {
+ if (port_id >= RX_10G_PORT_BASE)
+ port_speed = 10000;
+ else
+ port_speed = 1000;
+ port_type = FMAN_PORT_TYPE_RX;
+
+ } else {
+ dev_err(port->dev, "%s: Illegal port type\n", __func__);
+ err = -EINVAL;
+ goto return_err;
+ }
+
+ port->dts_params.type = port_type;
+ port->dts_params.speed = port_speed;
+
+ if (port_type == FMAN_PORT_TYPE_TX) {
+ u32 qman_channel_id;
+
+ qman_channel_id = fman_get_qman_channel_id(fman, port_id);
+ if (qman_channel_id == 0) {
+ dev_err(port->dev, "%s: incorrect qman-channel-id\n",
+ __func__);
+ err = -EINVAL;
+ goto return_err;
+ }
+ port->dts_params.qman_channel_id = qman_channel_id;
+ }
+
+ err = of_address_to_resource(port_node, 0, &res);
+ if (err < 0) {
+ dev_err(port->dev, "%s: of_address_to_resource() failed\n",
+ __func__);
+ err = -ENOMEM;
+ goto return_err;
+ }
+
+ port->dts_params.fman = fman;
+
+ of_node_put(port_node);
+
+ dev_res = __devm_request_region(port->dev, &res, res.start,
+ resource_size(&res), "fman-port");
+ if (!dev_res) {
+ dev_err(port->dev, "%s: __devm_request_region() failed\n",
+ __func__);
+ err = -EINVAL;
+ goto free_port;
+ }
+
+ port->dts_params.base_addr = devm_ioremap(port->dev, res.start,
+ resource_size(&res));
+ if (port->dts_params.base_addr == 0)
+ dev_err(port->dev, "%s: devm_ioremap() failed\n", __func__);
+
+ dev_set_drvdata(&of_dev->dev, port);
+
+ return 0;
+
+return_err:
+ of_node_put(port_node);
+free_port:
+ kfree(port);
+ return err;
+}
+
+static const struct of_device_id fman_port_match[] = {
+ {.compatible = "fsl,fman-v3-port-rx"},
+ {.compatible = "fsl,fman-v2-port-rx"},
+ {.compatible = "fsl,fman-v3-port-tx"},
+ {.compatible = "fsl,fman-v2-port-tx"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, fman_port_match);
+
+static struct platform_driver fman_port_driver = {
+ .driver = {
+ .name = "fsl-fman-port",
+ .of_match_table = fman_port_match,
+ },
+ .probe = fman_port_probe,
+};
+
+builtin_platform_driver(fman_port_driver);
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.h b/drivers/net/ethernet/freescale/fman/fman_port.h
new file mode 100644
index 0000000..8ba9017
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fman/fman_port.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __FMAN_PORT_H
+#define __FMAN_PORT_H
+
+#include "fman.h"
+
+/* FM Port API
+ * The FM uses a general module called "port" to represent a Tx port (MAC),
+ * an Rx port (MAC).
+ * The number of ports in an FM varies between SOCs.
+ * The SW driver manages these ports as sub-modules of the FM,i.e. after an
+ * FM is initialized, its ports may be initialized and operated upon.
+ * The port is initialized aware of its type, but other functions on a port
+ * may be indifferent to its type. When necessary, the driver verifies
+ * coherence and returns error if applicable.
+ * On initialization, user specifies the port type and it's index (relative
+ * to the port's type) - always starting at 0.
+ */
+
+/* FM Frame error */
+/* Frame Descriptor errors */
+/* Not for Rx-Port! Unsupported Format */
+#define FM_PORT_FRM_ERR_UNSUPPORTED_FORMAT FM_FD_ERR_UNSUPPORTED_FORMAT
+/* Not for Rx-Port! Length Error */
+#define FM_PORT_FRM_ERR_LENGTH FM_FD_ERR_LENGTH
+/* DMA Data error */
+#define FM_PORT_FRM_ERR_DMA FM_FD_ERR_DMA
+/* non Frame-Manager error; probably come from SEC that was chained to FM */
+#define FM_PORT_FRM_ERR_NON_FM FM_FD_RX_STATUS_ERR_NON_FM
+ /* IPR error */
+#define FM_PORT_FRM_ERR_IPRE (FM_FD_ERR_IPR & ~FM_FD_IPR)
+/* IPR non-consistent-sp */
+#define FM_PORT_FRM_ERR_IPR_NCSP (FM_FD_ERR_IPR_NCSP & \
+ ~FM_FD_IPR)
+
+/* Rx FIFO overflow, FCS error, code error, running disparity
+ * error (SGMII and TBI modes), FIFO parity error.
+ * PHY Sequence error, PHY error control character detected.
+ */
+#define FM_PORT_FRM_ERR_PHYSICAL FM_FD_ERR_PHYSICAL
+/* Frame too long OR Frame size exceeds max_length_frame */
+#define FM_PORT_FRM_ERR_SIZE FM_FD_ERR_SIZE
+/* indicates a classifier "drop" operation */
+#define FM_PORT_FRM_ERR_CLS_DISCARD FM_FD_ERR_CLS_DISCARD
+/* Extract Out of Frame */
+#define FM_PORT_FRM_ERR_EXTRACTION FM_FD_ERR_EXTRACTION
+/* No Scheme Selected */
+#define FM_PORT_FRM_ERR_NO_SCHEME FM_FD_ERR_NO_SCHEME
+/* Keysize Overflow */
+#define FM_PORT_FRM_ERR_KEYSIZE_OVERFLOW FM_FD_ERR_KEYSIZE_OVERFLOW
+/* Frame color is red */
+#define FM_PORT_FRM_ERR_COLOR_RED FM_FD_ERR_COLOR_RED
+/* Frame color is yellow */
+#define FM_PORT_FRM_ERR_COLOR_YELLOW FM_FD_ERR_COLOR_YELLOW
+/* Parser Time out Exceed */
+#define FM_PORT_FRM_ERR_PRS_TIMEOUT FM_FD_ERR_PRS_TIMEOUT
+/* Invalid Soft Parser instruction */
+#define FM_PORT_FRM_ERR_PRS_ILL_INSTRUCT FM_FD_ERR_PRS_ILL_INSTRUCT
+/* Header error was identified during parsing */
+#define FM_PORT_FRM_ERR_PRS_HDR_ERR FM_FD_ERR_PRS_HDR_ERR
+/* Frame parsed beyind 256 first bytes */
+#define FM_PORT_FRM_ERR_BLOCK_LIMIT_EXCEEDED FM_FD_ERR_BLOCK_LIMIT_EXCEEDED
+/* FPM Frame Processing Timeout Exceeded */
+#define FM_PORT_FRM_ERR_PROCESS_TIMEOUT 0x00000001
+
+struct fman_port;
+
+/* A structure for additional Rx port parameters */
+struct fman_port_rx_params {
+ u32 err_fqid; /* Error Queue Id. */
+ u32 dflt_fqid; /* Default Queue Id. */
+ /* Which external buffer pools are used
+ * (up to FMAN_PORT_MAX_EXT_POOLS_NUM), and their sizes.
+ */
+ struct fman_ext_pools ext_buf_pools;
+};
+
+/* A structure for additional non-Rx port parameters */
+struct fman_port_non_rx_params {
+ /* Error Queue Id. */
+ u32 err_fqid;
+ /* For Tx - Default Confirmation queue, 0 means no Tx confirmation
+ * for processed frames. For OP port - default Rx queue.
+ */
+ u32 dflt_fqid;
+};
+
+/* A union for additional parameters depending on port type */
+union fman_port_specific_params {
+ /* Rx port parameters structure */
+ struct fman_port_rx_params rx_params;
+ /* Non-Rx port parameters structure */
+ struct fman_port_non_rx_params non_rx_params;
+};
+
+/* A structure representing FM initialization parameters */
+struct fman_port_params {
+ /* Virtual Address of memory mapped FM Port registers. */
+ void *fm;
+ union fman_port_specific_params specific_params;
+ /* Additional parameters depending on port type. */
+};
+
+int fman_port_config(struct fman_port *port, struct fman_port_params *params);
+
+int fman_port_init(struct fman_port *port);
+
+int fman_port_cfg_buf_prefix_content(struct fman_port *port,
+ struct fman_buffer_prefix_content
+ *buffer_prefix_content);
+
+int fman_port_disable(struct fman_port *port);
+
+int fman_port_enable(struct fman_port *port);
+
+u32 fman_port_get_qman_channel_id(struct fman_port *port);
+
+struct fman_port *fman_port_bind(struct device *dev);
+
+#endif /* __FMAN_PORT_H */
diff --git a/drivers/net/ethernet/freescale/fman/fman_sp.c b/drivers/net/ethernet/freescale/fman/fman_sp.c
new file mode 100644
index 0000000..f9e7aa3
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fman/fman_sp.c
@@ -0,0 +1,166 @@
+/*
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "fman_sp.h"
+#include "fman.h"
+
+void fman_sp_set_buf_pools_in_asc_order_of_buf_sizes(struct fman_ext_pools
+ *fm_ext_pools,
+ u8 *ordered_array,
+ u16 *sizes_array)
+{
+ u16 buf_size = 0;
+ int i = 0, j = 0, k = 0;
+
+ /* First we copy the external buffers pools information
+ * to an ordered local array
+ */
+ for (i = 0; i < fm_ext_pools->num_of_pools_used; i++) {
+ /* get pool size */
+ buf_size = fm_ext_pools->ext_buf_pool[i].size;
+
+ /* keep sizes in an array according to poolId
+ * for direct access
+ */
+ sizes_array[fm_ext_pools->ext_buf_pool[i].id] = buf_size;
+
+ /* save poolId in an ordered array according to size */
+ for (j = 0; j <= i; j++) {
+ /* this is the next free place in the array */
+ if (j == i)
+ ordered_array[i] =
+ fm_ext_pools->ext_buf_pool[i].id;
+ else {
+ /* find the right place for this poolId */
+ if (buf_size < sizes_array[ordered_array[j]]) {
+ /* move the pool_ids one place ahead
+ * to make room for this poolId
+ */
+ for (k = i; k > j; k--)
+ ordered_array[k] =
+ ordered_array[k - 1];
+
+ /* now k==j, this is the place for
+ * the new size
+ */
+ ordered_array[k] =
+ fm_ext_pools->ext_buf_pool[i].id;
+ break;
+ }
+ }
+ }
+ }
+}
+
+int fman_sp_build_buffer_struct(struct fman_sp_int_context_data_copy *
+ int_context_data_copy,
+ struct fman_buffer_prefix_content *
+ buffer_prefix_content,
+ struct fman_sp_buf_margins *buf_margins,
+ struct fman_sp_buffer_offsets *buffer_offsets,
+ u8 *internal_buf_offset)
+{
+ u32 tmp;
+
+ /* Align start of internal context data to 16 byte */
+ int_context_data_copy->ext_buf_offset = (u16)
+ ((buffer_prefix_content->priv_data_size & (OFFSET_UNITS - 1)) ?
+ ((buffer_prefix_content->priv_data_size + OFFSET_UNITS) &
+ ~(u16)(OFFSET_UNITS - 1)) :
+ buffer_prefix_content->priv_data_size);
+
+ /* Translate margin and int_context params to FM parameters */
+ /* Initialize with illegal value. Later we'll set legal values. */
+ buffer_offsets->prs_result_offset = (u32)ILLEGAL_BASE;
+ buffer_offsets->time_stamp_offset = (u32)ILLEGAL_BASE;
+ buffer_offsets->hash_result_offset = (u32)ILLEGAL_BASE;
+
+ /* Internally the driver supports 4 options
+ * 1. prsResult/timestamp/hashResult selection (in fact 8 options,
+ * but for simplicity we'll
+ * relate to it as 1).
+ * 2. All IC context (from AD) not including debug.
+ */
+
+ /* This case covers the options under 1 */
+ /* Copy size must be in 16-byte granularity. */
+ int_context_data_copy->size =
+ (u16)((buffer_prefix_content->pass_prs_result ? 32 : 0) +
+ ((buffer_prefix_content->pass_time_stamp ||
+ buffer_prefix_content->pass_hash_result) ? 16 : 0));
+
+ /* Align start of internal context data to 16 byte */
+ int_context_data_copy->int_context_offset =
+ (u8)(buffer_prefix_content->pass_prs_result ? 32 :
+ ((buffer_prefix_content->pass_time_stamp ||
+ buffer_prefix_content->pass_hash_result) ? 64 : 0));
+
+ if (buffer_prefix_content->pass_prs_result)
+ buffer_offsets->prs_result_offset =
+ int_context_data_copy->ext_buf_offset;
+ if (buffer_prefix_content->pass_time_stamp)
+ buffer_offsets->time_stamp_offset =
+ buffer_prefix_content->pass_prs_result ?
+ (int_context_data_copy->ext_buf_offset +
+ sizeof(struct fman_prs_result)) :
+ int_context_data_copy->ext_buf_offset;
+ if (buffer_prefix_content->pass_hash_result)
+ /* If PR is not requested, whether TS is
+ * requested or not, IC will be copied from TS
+ */
+ buffer_offsets->hash_result_offset =
+ buffer_prefix_content->pass_prs_result ?
+ (int_context_data_copy->ext_buf_offset +
+ sizeof(struct fman_prs_result) + 8) :
+ int_context_data_copy->ext_buf_offset + 8;
+
+ if (int_context_data_copy->size)
+ buf_margins->start_margins =
+ (u16)(int_context_data_copy->ext_buf_offset +
+ int_context_data_copy->size);
+ else
+ /* No Internal Context passing, STartMargin is
+ * immediately after private_info
+ */
+ buf_margins->start_margins =
+ buffer_prefix_content->priv_data_size;
+
+ /* align data start */
+ tmp = (u32)(buf_margins->start_margins %
+ buffer_prefix_content->data_align);
+ if (tmp)
+ buf_margins->start_margins +=
+ (buffer_prefix_content->data_align - tmp);
+ buffer_offsets->data_offset = buf_margins->start_margins;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/freescale/fman/fman_sp.h b/drivers/net/ethernet/freescale/fman/fman_sp.h
new file mode 100644
index 0000000..820b7f6
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fman/fman_sp.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __FM_SP_H
+#define __FM_SP_H
+
+#include "fman.h"
+#include <linux/types.h>
+
+#define ILLEGAL_BASE (~0)
+
+/* defaults */
+#define DFLT_FM_SP_BUFFER_PREFIX_CONTEXT_DATA_ALIGN 64
+
+/* Registers bit fields */
+#define FMAN_SP_EXT_BUF_POOL_EN_COUNTER 0x40000000
+#define FMAN_SP_EXT_BUF_POOL_VALID 0x80000000
+#define FMAN_SP_EXT_BUF_POOL_BACKUP 0x20000000
+#define FMAN_SP_DMA_ATTR_WRITE_OPTIMIZE 0x00100000
+#define FMAN_SP_SG_DISABLE 0x80000000
+
+/* shifts */
+#define FMAN_SP_EXT_BUF_MARG_START_SHIFT 16
+#define FMAN_SP_DMA_ATTR_SWP_SHIFT 30
+#define FMAN_SP_IC_TO_EXT_SHIFT 16
+#define FMAN_SP_IC_FROM_INT_SHIFT 8
+
+/* structure for defining internal context copying */
+struct fman_sp_int_context_data_copy {
+ /* < Offset in External buffer to which internal
+ * context is copied to (Rx) or taken from (Tx, Op).
+ */
+ u16 ext_buf_offset;
+ /* Offset within internal context to copy from
+ * (Rx) or to copy to (Tx, Op).
+ */
+ u8 int_context_offset;
+ /* Internal offset size to be copied */
+ u16 size;
+};
+
+/* struct for defining external buffer margins */
+struct fman_sp_buf_margins {
+ /* Number of bytes to be left at the beginning
+ * of the external buffer (must be divisible by 16)
+ */
+ u16 start_margins;
+ /* number of bytes to be left at the end
+ * of the external buffer(must be divisible by 16)
+ */
+ u16 end_margins;
+};
+
+struct fman_sp_buffer_offsets {
+ u32 data_offset;
+ u32 prs_result_offset;
+ u32 time_stamp_offset;
+ u32 hash_result_offset;
+};
+
+int fman_sp_build_buffer_struct(struct fman_sp_int_context_data_copy
+ *int_context_data_copy,
+ struct fman_buffer_prefix_content
+ *buffer_prefix_content,
+ struct fman_sp_buf_margins *buf_margins,
+ struct fman_sp_buffer_offsets
+ *buffer_offsets,
+ u8 *internal_buf_offset);
+
+void fman_sp_set_buf_pools_in_asc_order_of_buf_sizes(struct fman_ext_pools
+ *fm_ext_pools,
+ u8 *ordered_array,
+ u16 *sizes_array);
+
+#endif /* __FM_SP_H */
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c
new file mode 100644
index 0000000..efabb04
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c
@@ -0,0 +1,786 @@
+/*
+ * Copyright 2008-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "fman_tgec.h"
+#include "fman.h"
+
+#include <linux/slab.h>
+#include <linux/bitrev.h>
+#include <linux/io.h>
+#include <linux/crc32.h>
+
+/* Transmit Inter-Packet Gap Length Register (TX_IPG_LENGTH) */
+#define TGEC_TX_IPG_LENGTH_MASK 0x000003ff
+
+/* Command and Configuration Register (COMMAND_CONFIG) */
+#define CMD_CFG_NO_LEN_CHK 0x00020000
+#define CMD_CFG_PAUSE_IGNORE 0x00000100
+#define CMF_CFG_CRC_FWD 0x00000040
+#define CMD_CFG_PROMIS_EN 0x00000010
+#define CMD_CFG_RX_EN 0x00000002
+#define CMD_CFG_TX_EN 0x00000001
+
+/* Interrupt Mask Register (IMASK) */
+#define TGEC_IMASK_MDIO_SCAN_EVENT 0x00010000
+#define TGEC_IMASK_MDIO_CMD_CMPL 0x00008000
+#define TGEC_IMASK_REM_FAULT 0x00004000
+#define TGEC_IMASK_LOC_FAULT 0x00002000
+#define TGEC_IMASK_TX_ECC_ER 0x00001000
+#define TGEC_IMASK_TX_FIFO_UNFL 0x00000800
+#define TGEC_IMASK_TX_FIFO_OVFL 0x00000400
+#define TGEC_IMASK_TX_ER 0x00000200
+#define TGEC_IMASK_RX_FIFO_OVFL 0x00000100
+#define TGEC_IMASK_RX_ECC_ER 0x00000080
+#define TGEC_IMASK_RX_JAB_FRM 0x00000040
+#define TGEC_IMASK_RX_OVRSZ_FRM 0x00000020
+#define TGEC_IMASK_RX_RUNT_FRM 0x00000010
+#define TGEC_IMASK_RX_FRAG_FRM 0x00000008
+#define TGEC_IMASK_RX_LEN_ER 0x00000004
+#define TGEC_IMASK_RX_CRC_ER 0x00000002
+#define TGEC_IMASK_RX_ALIGN_ER 0x00000001
+
+/* Hashtable Control Register (HASHTABLE_CTRL) */
+#define TGEC_HASH_MCAST_SHIFT 23
+#define TGEC_HASH_MCAST_EN 0x00000200
+#define TGEC_HASH_ADR_MSK 0x000001ff
+
+#define DEFAULT_TX_IPG_LENGTH 12
+#define DEFAULT_MAX_FRAME_LENGTH 0x600
+#define DEFAULT_PAUSE_QUANT 0xf000
+
+/* number of pattern match registers (entries) */
+#define TGEC_NUM_OF_PADDRS 1
+
+/* Group address bit indication */
+#define GROUP_ADDRESS 0x0000010000000000LL
+
+/* Hash table size (= 32 bits*8 regs) */
+#define TGEC_HASH_TABLE_SIZE 512
+
+/* tGEC memory map */
+struct tgec_regs {
+ u32 tgec_id; /* 0x000 Controller ID */
+ u32 reserved001[1]; /* 0x004 */
+ u32 command_config; /* 0x008 Control and configuration */
+ u32 mac_addr_0; /* 0x00c Lower 32 bits of the MAC adr */
+ u32 mac_addr_1; /* 0x010 Upper 16 bits of the MAC adr */
+ u32 maxfrm; /* 0x014 Maximum frame length */
+ u32 pause_quant; /* 0x018 Pause quanta */
+ u32 rx_fifo_sections; /* 0x01c */
+ u32 tx_fifo_sections; /* 0x020 */
+ u32 rx_fifo_almost_f_e; /* 0x024 */
+ u32 tx_fifo_almost_f_e; /* 0x028 */
+ u32 hashtable_ctrl; /* 0x02c Hash table control */
+ u32 mdio_cfg_status; /* 0x030 */
+ u32 mdio_command; /* 0x034 */
+ u32 mdio_data; /* 0x038 */
+ u32 mdio_regaddr; /* 0x03c */
+ u32 status; /* 0x040 */
+ u32 tx_ipg_len; /* 0x044 Transmitter inter-packet-gap */
+ u32 mac_addr_2; /* 0x048 Lower 32 bits of 2nd MAC adr */
+ u32 mac_addr_3; /* 0x04c Upper 16 bits of 2nd MAC adr */
+ u32 rx_fifo_ptr_rd; /* 0x050 */
+ u32 rx_fifo_ptr_wr; /* 0x054 */
+ u32 tx_fifo_ptr_rd; /* 0x058 */
+ u32 tx_fifo_ptr_wr; /* 0x05c */
+ u32 imask; /* 0x060 Interrupt mask */
+ u32 ievent; /* 0x064 Interrupt event */
+ u32 udp_port; /* 0x068 Defines a UDP Port number */
+ u32 type_1588v2; /* 0x06c Type field for 1588v2 */
+ u32 reserved070[4]; /* 0x070 */
+ /* 10Ge Statistics Counter */
+ u32 tfrm_u; /* 80 aFramesTransmittedOK */
+ u32 tfrm_l; /* 84 aFramesTransmittedOK */
+ u32 rfrm_u; /* 88 aFramesReceivedOK */
+ u32 rfrm_l; /* 8c aFramesReceivedOK */
+ u32 rfcs_u; /* 90 aFrameCheckSequenceErrors */
+ u32 rfcs_l; /* 94 aFrameCheckSequenceErrors */
+ u32 raln_u; /* 98 aAlignmentErrors */
+ u32 raln_l; /* 9c aAlignmentErrors */
+ u32 txpf_u; /* A0 aPAUSEMACCtrlFramesTransmitted */
+ u32 txpf_l; /* A4 aPAUSEMACCtrlFramesTransmitted */
+ u32 rxpf_u; /* A8 aPAUSEMACCtrlFramesReceived */
+ u32 rxpf_l; /* Ac aPAUSEMACCtrlFramesReceived */
+ u32 rlong_u; /* B0 aFrameTooLongErrors */
+ u32 rlong_l; /* B4 aFrameTooLongErrors */
+ u32 rflr_u; /* B8 aInRangeLengthErrors */
+ u32 rflr_l; /* Bc aInRangeLengthErrors */
+ u32 tvlan_u; /* C0 VLANTransmittedOK */
+ u32 tvlan_l; /* C4 VLANTransmittedOK */
+ u32 rvlan_u; /* C8 VLANReceivedOK */
+ u32 rvlan_l; /* Cc VLANReceivedOK */
+ u32 toct_u; /* D0 if_out_octets */
+ u32 toct_l; /* D4 if_out_octets */
+ u32 roct_u; /* D8 if_in_octets */
+ u32 roct_l; /* Dc if_in_octets */
+ u32 ruca_u; /* E0 if_in_ucast_pkts */
+ u32 ruca_l; /* E4 if_in_ucast_pkts */
+ u32 rmca_u; /* E8 ifInMulticastPkts */
+ u32 rmca_l; /* Ec ifInMulticastPkts */
+ u32 rbca_u; /* F0 ifInBroadcastPkts */
+ u32 rbca_l; /* F4 ifInBroadcastPkts */
+ u32 terr_u; /* F8 if_out_errors */
+ u32 terr_l; /* Fc if_out_errors */
+ u32 reserved100[2]; /* 100-108 */
+ u32 tuca_u; /* 108 if_out_ucast_pkts */
+ u32 tuca_l; /* 10c if_out_ucast_pkts */
+ u32 tmca_u; /* 110 ifOutMulticastPkts */
+ u32 tmca_l; /* 114 ifOutMulticastPkts */
+ u32 tbca_u; /* 118 ifOutBroadcastPkts */
+ u32 tbca_l; /* 11c ifOutBroadcastPkts */
+ u32 rdrp_u; /* 120 etherStatsDropEvents */
+ u32 rdrp_l; /* 124 etherStatsDropEvents */
+ u32 reoct_u; /* 128 etherStatsOctets */
+ u32 reoct_l; /* 12c etherStatsOctets */
+ u32 rpkt_u; /* 130 etherStatsPkts */
+ u32 rpkt_l; /* 134 etherStatsPkts */
+ u32 trund_u; /* 138 etherStatsUndersizePkts */
+ u32 trund_l; /* 13c etherStatsUndersizePkts */
+ u32 r64_u; /* 140 etherStatsPkts64Octets */
+ u32 r64_l; /* 144 etherStatsPkts64Octets */
+ u32 r127_u; /* 148 etherStatsPkts65to127Octets */
+ u32 r127_l; /* 14c etherStatsPkts65to127Octets */
+ u32 r255_u; /* 150 etherStatsPkts128to255Octets */
+ u32 r255_l; /* 154 etherStatsPkts128to255Octets */
+ u32 r511_u; /* 158 etherStatsPkts256to511Octets */
+ u32 r511_l; /* 15c etherStatsPkts256to511Octets */
+ u32 r1023_u; /* 160 etherStatsPkts512to1023Octets */
+ u32 r1023_l; /* 164 etherStatsPkts512to1023Octets */
+ u32 r1518_u; /* 168 etherStatsPkts1024to1518Octets */
+ u32 r1518_l; /* 16c etherStatsPkts1024to1518Octets */
+ u32 r1519x_u; /* 170 etherStatsPkts1519toX */
+ u32 r1519x_l; /* 174 etherStatsPkts1519toX */
+ u32 trovr_u; /* 178 etherStatsOversizePkts */
+ u32 trovr_l; /* 17c etherStatsOversizePkts */
+ u32 trjbr_u; /* 180 etherStatsJabbers */
+ u32 trjbr_l; /* 184 etherStatsJabbers */
+ u32 trfrg_u; /* 188 etherStatsFragments */
+ u32 trfrg_l; /* 18C etherStatsFragments */
+ u32 rerr_u; /* 190 if_in_errors */
+ u32 rerr_l; /* 194 if_in_errors */
+};
+
+struct tgec_cfg {
+ bool pause_ignore;
+ bool promiscuous_mode_enable;
+ u16 max_frame_length;
+ u16 pause_quant;
+ u32 tx_ipg_length;
+};
+
+struct fman_mac {
+ /* Pointer to the memory mapped registers. */
+ struct tgec_regs __iomem *regs;
+ /* MAC address of device; */
+ u64 addr;
+ u16 max_speed;
+ void *dev_id; /* device cookie used by the exception cbs */
+ fman_mac_exception_cb *exception_cb;
+ fman_mac_exception_cb *event_cb;
+ /* pointer to driver's global address hash table */
+ struct eth_hash_t *multicast_addr_hash;
+ /* pointer to driver's individual address hash table */
+ struct eth_hash_t *unicast_addr_hash;
+ u8 mac_id;
+ u32 exceptions;
+ struct tgec_cfg *cfg;
+ void *fm;
+ struct fman_rev_info fm_rev_info;
+};
+
+static void set_mac_address(struct tgec_regs __iomem *regs, u8 *adr)
+{
+ u32 tmp0, tmp1;
+
+ tmp0 = (u32)(adr[0] | adr[1] << 8 | adr[2] << 16 | adr[3] << 24);
+ tmp1 = (u32)(adr[4] | adr[5] << 8);
+ iowrite32be(tmp0, &regs->mac_addr_0);
+ iowrite32be(tmp1, &regs->mac_addr_1);
+}
+
+static void set_dflts(struct tgec_cfg *cfg)
+{
+ cfg->promiscuous_mode_enable = false;
+ cfg->pause_ignore = false;
+ cfg->tx_ipg_length = DEFAULT_TX_IPG_LENGTH;
+ cfg->max_frame_length = DEFAULT_MAX_FRAME_LENGTH;
+ cfg->pause_quant = DEFAULT_PAUSE_QUANT;
+}
+
+static int init(struct tgec_regs __iomem *regs, struct tgec_cfg *cfg,
+ u32 exception_mask)
+{
+ u32 tmp;
+
+ /* Config */
+ tmp = CMF_CFG_CRC_FWD;
+ if (cfg->promiscuous_mode_enable)
+ tmp |= CMD_CFG_PROMIS_EN;
+ if (cfg->pause_ignore)
+ tmp |= CMD_CFG_PAUSE_IGNORE;
+ /* Payload length check disable */
+ tmp |= CMD_CFG_NO_LEN_CHK;
+ iowrite32be(tmp, &regs->command_config);
+
+ /* Max Frame Length */
+ iowrite32be((u32)cfg->max_frame_length, &regs->maxfrm);
+ /* Pause Time */
+ iowrite32be(cfg->pause_quant, &regs->pause_quant);
+
+ /* clear all pending events and set-up interrupts */
+ iowrite32be(0xffffffff, &regs->ievent);
+ iowrite32be(ioread32be(&regs->imask) | exception_mask, &regs->imask);
+
+ return 0;
+}
+
+static int check_init_parameters(struct fman_mac *tgec)
+{
+ if (tgec->max_speed < SPEED_10000) {
+ pr_err("10G MAC driver only support 10G speed\n");
+ return -EINVAL;
+ }
+ if (tgec->addr == 0) {
+ pr_err("Ethernet 10G MAC Must have valid MAC Address\n");
+ return -EINVAL;
+ }
+ if (!tgec->exception_cb) {
+ pr_err("uninitialized exception_cb\n");
+ return -EINVAL;
+ }
+ if (!tgec->event_cb) {
+ pr_err("uninitialized event_cb\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int get_exception_flag(enum fman_mac_exceptions exception)
+{
+ u32 bit_mask;
+
+ switch (exception) {
+ case FM_MAC_EX_10G_MDIO_SCAN_EVENT:
+ bit_mask = TGEC_IMASK_MDIO_SCAN_EVENT;
+ break;
+ case FM_MAC_EX_10G_MDIO_CMD_CMPL:
+ bit_mask = TGEC_IMASK_MDIO_CMD_CMPL;
+ break;
+ case FM_MAC_EX_10G_REM_FAULT:
+ bit_mask = TGEC_IMASK_REM_FAULT;
+ break;
+ case FM_MAC_EX_10G_LOC_FAULT:
+ bit_mask = TGEC_IMASK_LOC_FAULT;
+ break;
+ case FM_MAC_EX_10G_TX_ECC_ER:
+ bit_mask = TGEC_IMASK_TX_ECC_ER;
+ break;
+ case FM_MAC_EX_10G_TX_FIFO_UNFL:
+ bit_mask = TGEC_IMASK_TX_FIFO_UNFL;
+ break;
+ case FM_MAC_EX_10G_TX_FIFO_OVFL:
+ bit_mask = TGEC_IMASK_TX_FIFO_OVFL;
+ break;
+ case FM_MAC_EX_10G_TX_ER:
+ bit_mask = TGEC_IMASK_TX_ER;
+ break;
+ case FM_MAC_EX_10G_RX_FIFO_OVFL:
+ bit_mask = TGEC_IMASK_RX_FIFO_OVFL;
+ break;
+ case FM_MAC_EX_10G_RX_ECC_ER:
+ bit_mask = TGEC_IMASK_RX_ECC_ER;
+ break;
+ case FM_MAC_EX_10G_RX_JAB_FRM:
+ bit_mask = TGEC_IMASK_RX_JAB_FRM;
+ break;
+ case FM_MAC_EX_10G_RX_OVRSZ_FRM:
+ bit_mask = TGEC_IMASK_RX_OVRSZ_FRM;
+ break;
+ case FM_MAC_EX_10G_RX_RUNT_FRM:
+ bit_mask = TGEC_IMASK_RX_RUNT_FRM;
+ break;
+ case FM_MAC_EX_10G_RX_FRAG_FRM:
+ bit_mask = TGEC_IMASK_RX_FRAG_FRM;
+ break;
+ case FM_MAC_EX_10G_RX_LEN_ER:
+ bit_mask = TGEC_IMASK_RX_LEN_ER;
+ break;
+ case FM_MAC_EX_10G_RX_CRC_ER:
+ bit_mask = TGEC_IMASK_RX_CRC_ER;
+ break;
+ case FM_MAC_EX_10G_RX_ALIGN_ER:
+ bit_mask = TGEC_IMASK_RX_ALIGN_ER;
+ break;
+ default:
+ bit_mask = 0;
+ break;
+ }
+
+ return bit_mask;
+}
+
+static void tgec_err_exception(void *handle)
+{
+ struct fman_mac *tgec = (struct fman_mac *)handle;
+ struct tgec_regs __iomem *regs = tgec->regs;
+ u32 event;
+
+ /* do not handle MDIO events */
+ event = ioread32be(&regs->ievent) &
+ ~(TGEC_IMASK_MDIO_SCAN_EVENT |
+ TGEC_IMASK_MDIO_CMD_CMPL);
+
+ event &= ioread32be(&regs->imask);
+
+ iowrite32be(event, &regs->ievent);
+
+ if (event & TGEC_IMASK_REM_FAULT)
+ tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_REM_FAULT);
+ if (event & TGEC_IMASK_LOC_FAULT)
+ tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_LOC_FAULT);
+ if (event & TGEC_IMASK_TX_ECC_ER)
+ tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_TX_ECC_ER);
+ if (event & TGEC_IMASK_TX_FIFO_UNFL)
+ tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_TX_FIFO_UNFL);
+ if (event & TGEC_IMASK_TX_FIFO_OVFL)
+ tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_TX_FIFO_OVFL);
+ if (event & TGEC_IMASK_TX_ER)
+ tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_TX_ER);
+ if (event & TGEC_IMASK_RX_FIFO_OVFL)
+ tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_FIFO_OVFL);
+ if (event & TGEC_IMASK_RX_ECC_ER)
+ tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_ECC_ER);
+ if (event & TGEC_IMASK_RX_JAB_FRM)
+ tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_JAB_FRM);
+ if (event & TGEC_IMASK_RX_OVRSZ_FRM)
+ tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_OVRSZ_FRM);
+ if (event & TGEC_IMASK_RX_RUNT_FRM)
+ tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_RUNT_FRM);
+ if (event & TGEC_IMASK_RX_FRAG_FRM)
+ tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_FRAG_FRM);
+ if (event & TGEC_IMASK_RX_LEN_ER)
+ tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_LEN_ER);
+ if (event & TGEC_IMASK_RX_CRC_ER)
+ tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_CRC_ER);
+ if (event & TGEC_IMASK_RX_ALIGN_ER)
+ tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_ALIGN_ER);
+}
+
+static void free_init_resources(struct fman_mac *tgec)
+{
+ fman_unregister_intr(tgec->fm, FMAN_MOD_MAC, tgec->mac_id,
+ FMAN_INTR_TYPE_ERR);
+
+ /* release the driver's group hash table */
+ free_hash_table(tgec->multicast_addr_hash);
+ tgec->multicast_addr_hash = NULL;
+
+ /* release the driver's individual hash table */
+ free_hash_table(tgec->unicast_addr_hash);
+ tgec->unicast_addr_hash = NULL;
+}
+
+static bool is_init_done(struct tgec_cfg *cfg)
+{
+ /* Checks if tGEC driver parameters were initialized */
+ if (!cfg)
+ return true;
+
+ return false;
+}
+
+int tgec_enable(struct fman_mac *tgec, enum comm_mode mode)
+{
+ struct tgec_regs __iomem *regs = tgec->regs;
+ u32 tmp;
+
+ if (!is_init_done(tgec->cfg))
+ return -EINVAL;
+
+ tmp = ioread32be(&regs->command_config);
+ if (mode & COMM_MODE_RX)
+ tmp |= CMD_CFG_RX_EN;
+ if (mode & COMM_MODE_TX)
+ tmp |= CMD_CFG_TX_EN;
+ iowrite32be(tmp, &regs->command_config);
+
+ return 0;
+}
+
+int tgec_disable(struct fman_mac *tgec, enum comm_mode mode)
+{
+ struct tgec_regs __iomem *regs = tgec->regs;
+ u32 tmp;
+
+ if (!is_init_done(tgec->cfg))
+ return -EINVAL;
+
+ tmp = ioread32be(&regs->command_config);
+ if (mode & COMM_MODE_RX)
+ tmp &= ~CMD_CFG_RX_EN;
+ if (mode & COMM_MODE_TX)
+ tmp &= ~CMD_CFG_TX_EN;
+ iowrite32be(tmp, &regs->command_config);
+
+ return 0;
+}
+
+int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val)
+{
+ struct tgec_regs __iomem *regs = tgec->regs;
+ u32 tmp;
+
+ if (!is_init_done(tgec->cfg))
+ return -EINVAL;
+
+ tmp = ioread32be(&regs->command_config);
+ if (new_val)
+ tmp |= CMD_CFG_PROMIS_EN;
+ else
+ tmp &= ~CMD_CFG_PROMIS_EN;
+ iowrite32be(tmp, &regs->command_config);
+
+ return 0;
+}
+
+int tgec_cfg_max_frame_len(struct fman_mac *tgec, u16 new_val)
+{
+ if (is_init_done(tgec->cfg))
+ return -EINVAL;
+
+ tgec->cfg->max_frame_length = new_val;
+
+ return 0;
+}
+
+int tgec_set_tx_pause_frames(struct fman_mac *tgec, u8 __maybe_unused priority,
+ u16 pause_time, u16 __maybe_unused thresh_time)
+{
+ struct tgec_regs __iomem *regs = tgec->regs;
+
+ if (!is_init_done(tgec->cfg))
+ return -EINVAL;
+
+ iowrite32be((u32)pause_time, &regs->pause_quant);
+
+ return 0;
+}
+
+int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en)
+{
+ struct tgec_regs __iomem *regs = tgec->regs;
+ u32 tmp;
+
+ if (!is_init_done(tgec->cfg))
+ return -EINVAL;
+
+ tmp = ioread32be(&regs->command_config);
+ if (!en)
+ tmp |= CMD_CFG_PAUSE_IGNORE;
+ else
+ tmp &= ~CMD_CFG_PAUSE_IGNORE;
+ iowrite32be(tmp, &regs->command_config);
+
+ return 0;
+}
+
+int tgec_modify_mac_address(struct fman_mac *tgec, enet_addr_t *p_enet_addr)
+{
+ if (!is_init_done(tgec->cfg))
+ return -EINVAL;
+
+ tgec->addr = ENET_ADDR_TO_UINT64(*p_enet_addr);
+ set_mac_address(tgec->regs, (u8 *)(*p_enet_addr));
+
+ return 0;
+}
+
+int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
+{
+ struct tgec_regs __iomem *regs = tgec->regs;
+ struct eth_hash_entry *hash_entry;
+ u32 crc = 0xFFFFFFFF, hash;
+ u64 addr;
+
+ if (!is_init_done(tgec->cfg))
+ return -EINVAL;
+
+ addr = ENET_ADDR_TO_UINT64(*eth_addr);
+
+ if (!(addr & GROUP_ADDRESS)) {
+ /* Unicast addresses not supported in hash */
+ pr_err("Unicast Address\n");
+ return -EINVAL;
+ }
+ /* CRC calculation */
+ crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
+ crc = bitrev32(crc);
+ /* Take 9 MSB bits */
+ hash = (crc >> TGEC_HASH_MCAST_SHIFT) & TGEC_HASH_ADR_MSK;
+
+ /* Create element to be added to the driver hash table */
+ hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL);
+ if (!hash_entry)
+ return -ENOMEM;
+ hash_entry->addr = addr;
+ INIT_LIST_HEAD(&hash_entry->node);
+
+ list_add_tail(&hash_entry->node,
+ &tgec->multicast_addr_hash->lsts[hash]);
+ iowrite32be((hash | TGEC_HASH_MCAST_EN), &regs->hashtable_ctrl);
+
+ return 0;
+}
+
+int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
+{
+ struct tgec_regs __iomem *regs = tgec->regs;
+ struct eth_hash_entry *hash_entry = NULL;
+ struct list_head *pos;
+ u32 crc = 0xFFFFFFFF, hash;
+ u64 addr;
+
+ if (!is_init_done(tgec->cfg))
+ return -EINVAL;
+
+ addr = ((*(u64 *)eth_addr) >> 16);
+
+ /* CRC calculation */
+ crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
+ crc = bitrev32(crc);
+ /* Take 9 MSB bits */
+ hash = (crc >> TGEC_HASH_MCAST_SHIFT) & TGEC_HASH_ADR_MSK;
+
+ list_for_each(pos, &tgec->multicast_addr_hash->lsts[hash]) {
+ hash_entry = ETH_HASH_ENTRY_OBJ(pos);
+ if (hash_entry->addr == addr) {
+ list_del_init(&hash_entry->node);
+ kfree(hash_entry);
+ break;
+ }
+ }
+ if (list_empty(&tgec->multicast_addr_hash->lsts[hash]))
+ iowrite32be((hash & ~TGEC_HASH_MCAST_EN),
+ &regs->hashtable_ctrl);
+
+ return 0;
+}
+
+int tgec_get_version(struct fman_mac *tgec, u32 *mac_version)
+{
+ struct tgec_regs __iomem *regs = tgec->regs;
+
+ if (!is_init_done(tgec->cfg))
+ return -EINVAL;
+
+ *mac_version = ioread32be(&regs->tgec_id);
+
+ return 0;
+}
+
+int tgec_set_exception(struct fman_mac *tgec,
+ enum fman_mac_exceptions exception, bool enable)
+{
+ struct tgec_regs __iomem *regs = tgec->regs;
+ u32 bit_mask = 0;
+
+ if (!is_init_done(tgec->cfg))
+ return -EINVAL;
+
+ bit_mask = get_exception_flag(exception);
+ if (bit_mask) {
+ if (enable)
+ tgec->exceptions |= bit_mask;
+ else
+ tgec->exceptions &= ~bit_mask;
+ } else {
+ pr_err("Undefined exception\n");
+ return -EINVAL;
+ }
+ if (enable)
+ iowrite32be(ioread32be(&regs->imask) | bit_mask, &regs->imask);
+ else
+ iowrite32be(ioread32be(&regs->imask) & ~bit_mask, &regs->imask);
+
+ return 0;
+}
+
+int tgec_init(struct fman_mac *tgec)
+{
+ struct tgec_cfg *cfg;
+ enet_addr_t eth_addr;
+ int err;
+
+ if (is_init_done(tgec->cfg))
+ return -EINVAL;
+
+ if (DEFAULT_RESET_ON_INIT &&
+ (fman_reset_mac(tgec->fm, tgec->mac_id) != 0)) {
+ pr_err("Can't reset MAC!\n");
+ return -EINVAL;
+ }
+
+ err = check_init_parameters(tgec);
+ if (err)
+ return err;
+
+ cfg = tgec->cfg;
+
+ MAKE_ENET_ADDR_FROM_UINT64(tgec->addr, eth_addr);
+ set_mac_address(tgec->regs, (u8 *)eth_addr);
+
+ /* interrupts */
+ /* FM_10G_REM_N_LCL_FLT_EX_10GMAC_ERRATA_SW005 Errata workaround */
+ if (tgec->fm_rev_info.major <= 2)
+ tgec->exceptions &= ~(TGEC_IMASK_REM_FAULT |
+ TGEC_IMASK_LOC_FAULT);
+
+ err = init(tgec->regs, cfg, tgec->exceptions);
+ if (err) {
+ free_init_resources(tgec);
+ pr_err("TGEC version doesn't support this i/f mode\n");
+ return err;
+ }
+
+ /* Max Frame Length */
+ err = fman_set_mac_max_frame(tgec->fm, tgec->mac_id,
+ cfg->max_frame_length);
+ if (err) {
+ pr_err("Setting max frame length FAILED\n");
+ free_init_resources(tgec);
+ return -EINVAL;
+ }
+
+ /* FM_TX_FIFO_CORRUPTION_ERRATA_10GMAC_A007 Errata workaround */
+ if (tgec->fm_rev_info.major == 2) {
+ struct tgec_regs __iomem *regs = tgec->regs;
+ u32 tmp;
+
+ /* restore the default tx ipg Length */
+ tmp = (ioread32be(&regs->tx_ipg_len) &
+ ~TGEC_TX_IPG_LENGTH_MASK) | 12;
+
+ iowrite32be(tmp, &regs->tx_ipg_len);
+ }
+
+ tgec->multicast_addr_hash = alloc_hash_table(TGEC_HASH_TABLE_SIZE);
+ if (!tgec->multicast_addr_hash) {
+ free_init_resources(tgec);
+ pr_err("allocation hash table is FAILED\n");
+ return -ENOMEM;
+ }
+
+ tgec->unicast_addr_hash = alloc_hash_table(TGEC_HASH_TABLE_SIZE);
+ if (!tgec->unicast_addr_hash) {
+ free_init_resources(tgec);
+ pr_err("allocation hash table is FAILED\n");
+ return -ENOMEM;
+ }
+
+ fman_register_intr(tgec->fm, FMAN_MOD_MAC, tgec->mac_id,
+ FMAN_INTR_TYPE_ERR, tgec_err_exception, tgec);
+
+ kfree(cfg);
+ tgec->cfg = NULL;
+
+ return 0;
+}
+
+int tgec_free(struct fman_mac *tgec)
+{
+ free_init_resources(tgec);
+
+ if (tgec->cfg)
+ tgec->cfg = NULL;
+
+ kfree(tgec->cfg);
+ kfree(tgec);
+
+ return 0;
+}
+
+struct fman_mac *tgec_config(struct fman_mac_params *params)
+{
+ struct fman_mac *tgec;
+ struct tgec_cfg *cfg;
+ void __iomem *base_addr;
+
+ base_addr = params->base_addr;
+ /* allocate memory for the UCC GETH data structure. */
+ tgec = kzalloc(sizeof(*tgec), GFP_KERNEL);
+ if (!tgec)
+ return NULL;
+
+ /* allocate memory for the 10G MAC driver parameters data structure. */
+ cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+ if (!cfg) {
+ tgec_free(tgec);
+ return NULL;
+ }
+
+ /* Plant parameter structure pointer */
+ tgec->cfg = cfg;
+
+ set_dflts(cfg);
+
+ tgec->regs = base_addr;
+ tgec->addr = ENET_ADDR_TO_UINT64(params->addr);
+ tgec->max_speed = params->max_speed;
+ tgec->mac_id = params->mac_id;
+ tgec->exceptions = (TGEC_IMASK_MDIO_SCAN_EVENT |
+ TGEC_IMASK_REM_FAULT |
+ TGEC_IMASK_LOC_FAULT |
+ TGEC_IMASK_TX_ECC_ER |
+ TGEC_IMASK_TX_FIFO_UNFL |
+ TGEC_IMASK_TX_FIFO_OVFL |
+ TGEC_IMASK_TX_ER |
+ TGEC_IMASK_RX_FIFO_OVFL |
+ TGEC_IMASK_RX_ECC_ER |
+ TGEC_IMASK_RX_JAB_FRM |
+ TGEC_IMASK_RX_OVRSZ_FRM |
+ TGEC_IMASK_RX_RUNT_FRM |
+ TGEC_IMASK_RX_FRAG_FRM |
+ TGEC_IMASK_RX_CRC_ER |
+ TGEC_IMASK_RX_ALIGN_ER);
+ tgec->exception_cb = params->exception_cb;
+ tgec->event_cb = params->event_cb;
+ tgec->dev_id = params->dev_id;
+ tgec->fm = params->fm;
+
+ /* Save FMan revision */
+ fman_get_revision(tgec->fm, &tgec->fm_rev_info);
+
+ return tgec;
+}
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.h b/drivers/net/ethernet/freescale/fman/fman_tgec.h
new file mode 100644
index 0000000..514bba9
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2008-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __TGEC_H
+#define __TGEC_H
+
+#include "fman_mac.h"
+
+struct fman_mac *tgec_config(struct fman_mac_params *params);
+int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val);
+int tgec_modify_mac_address(struct fman_mac *tgec, enet_addr_t *enet_addr);
+int tgec_cfg_max_frame_len(struct fman_mac *tgec, u16 new_val);
+int tgec_enable(struct fman_mac *tgec, enum comm_mode mode);
+int tgec_disable(struct fman_mac *tgec, enum comm_mode mode);
+int tgec_init(struct fman_mac *tgec);
+int tgec_free(struct fman_mac *tgec);
+int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en);
+int tgec_set_tx_pause_frames(struct fman_mac *tgec, u8 priority,
+ u16 pause_time, u16 thresh_time);
+int tgec_set_exception(struct fman_mac *tgec,
+ enum fman_mac_exceptions exception, bool enable);
+int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr);
+int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr);
+int tgec_get_version(struct fman_mac *tgec, u32 *mac_version);
+
+#endif /* __TGEC_H */
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
new file mode 100644
index 0000000..743a393
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -0,0 +1,978 @@
+/* Copyright 2008-2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/device.h>
+#include <linux/phy.h>
+#include <linux/netdevice.h>
+#include <linux/phy_fixed.h>
+#include <linux/etherdevice.h>
+#include <linux/libfdt_env.h>
+
+#include "mac.h"
+#include "fman_mac.h"
+#include "fman_dtsec.h"
+#include "fman_tgec.h"
+#include "fman_memac.h"
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("FSL FMan MAC API based driver");
+
+struct mac_priv_s {
+ struct device *dev;
+ void __iomem *vaddr;
+ u8 cell_index;
+ phy_interface_t phy_if;
+ struct fman *fman;
+ struct device_node *phy_node;
+ struct device_node *internal_phy_node;
+ /* List of multicast addresses */
+ struct list_head mc_addr_list;
+ struct platform_device *eth_dev;
+ struct fixed_phy_status *fixed_link;
+ u16 speed;
+ u16 max_speed;
+
+ int (*enable)(struct fman_mac *mac_dev, enum comm_mode mode);
+ int (*disable)(struct fman_mac *mac_dev, enum comm_mode mode);
+};
+
+struct mac_address {
+ u8 addr[ETH_ALEN];
+ struct list_head list;
+};
+
+static void mac_exception(void *handle, enum fman_mac_exceptions ex)
+{
+ struct mac_device *mac_dev;
+ struct mac_priv_s *priv;
+
+ mac_dev = handle;
+ priv = mac_dev->priv;
+
+ if (ex == FM_MAC_EX_10G_RX_FIFO_OVFL) {
+ /* don't flag RX FIFO after the first */
+ mac_dev->set_exception(mac_dev->fman_mac,
+ FM_MAC_EX_10G_RX_FIFO_OVFL, false);
+ dev_err(priv->dev, "10G MAC got RX FIFO Error = %x\n", ex);
+ }
+
+ dev_dbg(priv->dev, "%s:%s() -> %d\n", KBUILD_BASENAME ".c",
+ __func__, ex);
+}
+
+static void set_fman_mac_params(struct mac_device *mac_dev,
+ struct fman_mac_params *params)
+{
+ struct mac_priv_s *priv = mac_dev->priv;
+
+ params->base_addr = (typeof(params->base_addr))
+ devm_ioremap(priv->dev, mac_dev->res->start,
+ resource_size(mac_dev->res));
+ memcpy(&params->addr, mac_dev->addr, sizeof(mac_dev->addr));
+ params->max_speed = priv->max_speed;
+ params->phy_if = priv->phy_if;
+ params->basex_if = false;
+ params->mac_id = priv->cell_index;
+ params->fm = (void *)priv->fman;
+ params->exception_cb = mac_exception;
+ params->event_cb = mac_exception;
+ params->dev_id = mac_dev;
+ params->internal_phy_node = priv->internal_phy_node;
+}
+
+static int tgec_initialization(struct mac_device *mac_dev)
+{
+ int err;
+ struct mac_priv_s *priv;
+ struct fman_mac_params params;
+ u32 version;
+
+ priv = mac_dev->priv;
+
+ set_fman_mac_params(mac_dev, &params);
+
+ mac_dev->fman_mac = tgec_config(&params);
+ if (!mac_dev->fman_mac) {
+ err = -EINVAL;
+ goto _return;
+ }
+
+ err = tgec_cfg_max_frame_len(mac_dev->fman_mac, fman_get_max_frm());
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ err = tgec_init(mac_dev->fman_mac);
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ /* For 10G MAC, disable Tx ECC exception */
+ err = mac_dev->set_exception(mac_dev->fman_mac,
+ FM_MAC_EX_10G_TX_ECC_ER, false);
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ err = tgec_get_version(mac_dev->fman_mac, &version);
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ dev_info(priv->dev, "FMan XGEC version: 0x%08x\n", version);
+
+ goto _return;
+
+_return_fm_mac_free:
+ tgec_free(mac_dev->fman_mac);
+
+_return:
+ return err;
+}
+
+static int dtsec_initialization(struct mac_device *mac_dev)
+{
+ int err;
+ struct mac_priv_s *priv;
+ struct fman_mac_params params;
+ u32 version;
+
+ priv = mac_dev->priv;
+
+ set_fman_mac_params(mac_dev, &params);
+
+ mac_dev->fman_mac = dtsec_config(&params);
+ if (!mac_dev->fman_mac) {
+ err = -EINVAL;
+ goto _return;
+ }
+
+ err = dtsec_cfg_max_frame_len(mac_dev->fman_mac, fman_get_max_frm());
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ err = dtsec_cfg_pad_and_crc(mac_dev->fman_mac, true);
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ err = dtsec_init(mac_dev->fman_mac);
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ /* For 1G MAC, disable by default the MIB counters overflow interrupt */
+ err = mac_dev->set_exception(mac_dev->fman_mac,
+ FM_MAC_EX_1G_RX_MIB_CNT_OVFL, false);
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ err = dtsec_get_version(mac_dev->fman_mac, &version);
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ dev_info(priv->dev, "FMan dTSEC version: 0x%08x\n", version);
+
+ goto _return;
+
+_return_fm_mac_free:
+ dtsec_free(mac_dev->fman_mac);
+
+_return:
+ return err;
+}
+
+static int memac_initialization(struct mac_device *mac_dev)
+{
+ int err;
+ struct mac_priv_s *priv;
+ struct fman_mac_params params;
+
+ priv = mac_dev->priv;
+
+ set_fman_mac_params(mac_dev, &params);
+
+ if (priv->max_speed == SPEED_10000)
+ params.phy_if = PHY_INTERFACE_MODE_XGMII;
+
+ mac_dev->fman_mac = memac_config(&params);
+ if (!mac_dev->fman_mac) {
+ err = -EINVAL;
+ goto _return;
+ }
+
+ err = memac_cfg_max_frame_len(mac_dev->fman_mac, fman_get_max_frm());
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ err = memac_cfg_reset_on_init(mac_dev->fman_mac, true);
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ err = memac_cfg_fixed_link(mac_dev->fman_mac, priv->fixed_link);
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ err = memac_init(mac_dev->fman_mac);
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ dev_info(priv->dev, "FMan MEMAC\n");
+
+ goto _return;
+
+_return_fm_mac_free:
+ memac_free(mac_dev->fman_mac);
+
+_return:
+ return err;
+}
+
+static int start(struct mac_device *mac_dev)
+{
+ int err;
+ struct phy_device *phy_dev = mac_dev->phy_dev;
+ struct mac_priv_s *priv = mac_dev->priv;
+
+ err = priv->enable(mac_dev->fman_mac, COMM_MODE_RX_AND_TX);
+ if (!err && phy_dev)
+ phy_start(phy_dev);
+
+ return err;
+}
+
+static int stop(struct mac_device *mac_dev)
+{
+ struct mac_priv_s *priv = mac_dev->priv;
+
+ if (mac_dev->phy_dev)
+ phy_stop(mac_dev->phy_dev);
+
+ return priv->disable(mac_dev->fman_mac, COMM_MODE_RX_AND_TX);
+}
+
+static int set_multi(struct net_device *net_dev, struct mac_device *mac_dev)
+{
+ struct mac_priv_s *priv;
+ struct mac_address *old_addr, *tmp;
+ struct netdev_hw_addr *ha;
+ int err;
+ enet_addr_t *addr;
+
+ priv = mac_dev->priv;
+
+ /* Clear previous address list */
+ list_for_each_entry_safe(old_addr, tmp, &priv->mc_addr_list, list) {
+ addr = (enet_addr_t *)old_addr->addr;
+ err = mac_dev->remove_hash_mac_addr(mac_dev->fman_mac, addr);
+ if (err < 0)
+ return err;
+
+ list_del(&old_addr->list);
+ kfree(old_addr);
+ }
+
+ /* Add all the addresses from the new list */
+ netdev_for_each_mc_addr(ha, net_dev) {
+ addr = (enet_addr_t *)ha->addr;
+ err = mac_dev->add_hash_mac_addr(mac_dev->fman_mac, addr);
+ if (err < 0)
+ return err;
+
+ tmp = kmalloc(sizeof(*tmp), GFP_ATOMIC);
+ if (!tmp)
+ return -ENOMEM;
+
+ ether_addr_copy(tmp->addr, ha->addr);
+ list_add(&tmp->list, &priv->mc_addr_list);
+ }
+ return 0;
+}
+
+/**
+ * fman_set_mac_active_pause
+ * @mac_dev: A pointer to the MAC device
+ * @rx: Pause frame setting for RX
+ * @tx: Pause frame setting for TX
+ *
+ * Set the MAC RX/TX PAUSE frames settings
+ *
+ * Avoid redundant calls to FMD, if the MAC driver already contains the desired
+ * active PAUSE settings. Otherwise, the new active settings should be reflected
+ * in FMan.
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+int fman_set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx)
+{
+ struct fman_mac *fman_mac = mac_dev->fman_mac;
+ int err = 0;
+
+ if (rx != mac_dev->rx_pause_active) {
+ err = mac_dev->set_rx_pause(fman_mac, rx);
+ if (likely(err == 0))
+ mac_dev->rx_pause_active = rx;
+ }
+
+ if (tx != mac_dev->tx_pause_active) {
+ u16 pause_time = (tx ? FSL_FM_PAUSE_TIME_ENABLE :
+ FSL_FM_PAUSE_TIME_DISABLE);
+
+ err = mac_dev->set_tx_pause(fman_mac, 0, pause_time, 0);
+
+ if (likely(err == 0))
+ mac_dev->tx_pause_active = tx;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(fman_set_mac_active_pause);
+
+/**
+ * fman_get_pause_cfg
+ * @mac_dev: A pointer to the MAC device
+ * @rx: Return value for RX setting
+ * @tx: Return value for TX setting
+ *
+ * Determine the MAC RX/TX PAUSE frames settings based on PHY
+ * autonegotiation or values set by eththool.
+ *
+ * Return: Pointer to FMan device.
+ */
+void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause,
+ bool *tx_pause)
+{
+ struct phy_device *phy_dev = mac_dev->phy_dev;
+ u16 lcl_adv, rmt_adv;
+ u8 flowctrl;
+
+ *rx_pause = *tx_pause = false;
+
+ if (!phy_dev->duplex)
+ return;
+
+ /* If PAUSE autonegotiation is disabled, the TX/RX PAUSE settings
+ * are those set by ethtool.
+ */
+ if (!mac_dev->autoneg_pause) {
+ *rx_pause = mac_dev->rx_pause_req;
+ *tx_pause = mac_dev->tx_pause_req;
+ return;
+ }
+
+ /* Else if PAUSE autonegotiation is enabled, the TX/RX PAUSE
+ * settings depend on the result of the link negotiation.
+ */
+
+ /* get local capabilities */
+ lcl_adv = 0;
+ if (phy_dev->advertising & ADVERTISED_Pause)
+ lcl_adv |= ADVERTISE_PAUSE_CAP;
+ if (phy_dev->advertising & ADVERTISED_Asym_Pause)
+ lcl_adv |= ADVERTISE_PAUSE_ASYM;
+
+ /* get link partner capabilities */
+ rmt_adv = 0;
+ if (phy_dev->pause)
+ rmt_adv |= LPA_PAUSE_CAP;
+ if (phy_dev->asym_pause)
+ rmt_adv |= LPA_PAUSE_ASYM;
+
+ /* Calculate TX/RX settings based on local and peer advertised
+ * symmetric/asymmetric PAUSE capabilities.
+ */
+ flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
+ if (flowctrl & FLOW_CTRL_RX)
+ *rx_pause = true;
+ if (flowctrl & FLOW_CTRL_TX)
+ *tx_pause = true;
+}
+EXPORT_SYMBOL(fman_get_pause_cfg);
+
+static void adjust_link_void(struct net_device *net_dev)
+{
+}
+
+static void adjust_link_dtsec(struct net_device *net_dev)
+{
+ struct device *dev = net_dev->dev.parent;
+ struct dpaa_eth_data *eth_data = dev->platform_data;
+ struct mac_device *mac_dev = eth_data->mac_dev;
+ struct phy_device *phy_dev = mac_dev->phy_dev;
+ struct fman_mac *fman_mac;
+ bool rx_pause, tx_pause;
+ int err;
+
+ fman_mac = mac_dev->fman_mac;
+ if (!phy_dev->link) {
+ dtsec_restart_autoneg(fman_mac);
+
+ return;
+ }
+
+ dtsec_adjust_link(fman_mac, phy_dev->speed);
+ fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
+ err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
+ if (err < 0)
+ netdev_err(net_dev, "fman_set_mac_active_pause() = %d\n", err);
+}
+
+static void adjust_link_memac(struct net_device *net_dev)
+{
+ struct device *dev = net_dev->dev.parent;
+ struct dpaa_eth_data *eth_data = dev->platform_data;
+ struct mac_device *mac_dev = eth_data->mac_dev;
+ struct phy_device *phy_dev = mac_dev->phy_dev;
+ struct fman_mac *fman_mac;
+ bool rx_pause, tx_pause;
+ int err;
+
+ fman_mac = mac_dev->fman_mac;
+ memac_adjust_link(fman_mac, phy_dev->speed);
+
+ fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
+ err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
+ if (err < 0)
+ netdev_err(net_dev, "fman_set_mac_active_pause() = %d\n", err);
+}
+
+/* Initializes driver's PHY state, and attaches to the PHY.
+ * Returns 0 on success.
+ */
+static int init_phy(struct net_device *net_dev,
+ struct mac_device *mac_dev,
+ void (*adj_lnk)(struct net_device *))
+{
+ struct phy_device *phy_dev;
+ struct mac_priv_s *priv = mac_dev->priv;
+
+ phy_dev = of_phy_connect(net_dev, priv->phy_node, adj_lnk, 0,
+ priv->phy_if);
+ if (!phy_dev) {
+ netdev_err(net_dev, "Could not connect to PHY\n");
+ return -ENODEV;
+ }
+
+ /* Remove any features not supported by the controller */
+ phy_dev->supported &= mac_dev->if_support;
+ /* Enable the symmetric and asymmetric PAUSE frame advertisements,
+ * as most of the PHY drivers do not enable them by default.
+ */
+ phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
+ phy_dev->advertising = phy_dev->supported;
+
+ mac_dev->phy_dev = phy_dev;
+
+ return 0;
+}
+
+static int dtsec_init_phy(struct net_device *net_dev,
+ struct mac_device *mac_dev)
+{
+ return init_phy(net_dev, mac_dev, &adjust_link_dtsec);
+}
+
+static int tgec_init_phy(struct net_device *net_dev,
+ struct mac_device *mac_dev)
+{
+ return init_phy(net_dev, mac_dev, adjust_link_void);
+}
+
+static int memac_init_phy(struct net_device *net_dev,
+ struct mac_device *mac_dev)
+{
+ return init_phy(net_dev, mac_dev, &adjust_link_memac);
+}
+
+static void setup_dtsec(struct mac_device *mac_dev)
+{
+ mac_dev->init_phy = dtsec_init_phy;
+ mac_dev->init = dtsec_initialization;
+ mac_dev->set_promisc = dtsec_set_promiscuous;
+ mac_dev->change_addr = dtsec_modify_mac_address;
+ mac_dev->add_hash_mac_addr = dtsec_add_hash_mac_address;
+ mac_dev->remove_hash_mac_addr = dtsec_del_hash_mac_address;
+ mac_dev->set_tx_pause = dtsec_set_tx_pause_frames;
+ mac_dev->set_rx_pause = dtsec_accept_rx_pause_frames;
+ mac_dev->set_exception = dtsec_set_exception;
+ mac_dev->set_multi = set_multi;
+ mac_dev->start = start;
+ mac_dev->stop = stop;
+
+ mac_dev->priv->enable = dtsec_enable;
+ mac_dev->priv->disable = dtsec_disable;
+}
+
+static void setup_tgec(struct mac_device *mac_dev)
+{
+ mac_dev->init_phy = tgec_init_phy;
+ mac_dev->init = tgec_initialization;
+ mac_dev->set_promisc = tgec_set_promiscuous;
+ mac_dev->change_addr = tgec_modify_mac_address;
+ mac_dev->add_hash_mac_addr = tgec_add_hash_mac_address;
+ mac_dev->remove_hash_mac_addr = tgec_del_hash_mac_address;
+ mac_dev->set_tx_pause = tgec_set_tx_pause_frames;
+ mac_dev->set_rx_pause = tgec_accept_rx_pause_frames;
+ mac_dev->set_exception = tgec_set_exception;
+ mac_dev->set_multi = set_multi;
+ mac_dev->start = start;
+ mac_dev->stop = stop;
+
+ mac_dev->priv->enable = tgec_enable;
+ mac_dev->priv->disable = tgec_disable;
+}
+
+static void setup_memac(struct mac_device *mac_dev)
+{
+ mac_dev->init_phy = memac_init_phy;
+ mac_dev->init = memac_initialization;
+ mac_dev->set_promisc = memac_set_promiscuous;
+ mac_dev->change_addr = memac_modify_mac_address;
+ mac_dev->add_hash_mac_addr = memac_add_hash_mac_address;
+ mac_dev->remove_hash_mac_addr = memac_del_hash_mac_address;
+ mac_dev->set_tx_pause = memac_set_tx_pause_frames;
+ mac_dev->set_rx_pause = memac_accept_rx_pause_frames;
+ mac_dev->set_exception = memac_set_exception;
+ mac_dev->set_multi = set_multi;
+ mac_dev->start = start;
+ mac_dev->stop = stop;
+
+ mac_dev->priv->enable = memac_enable;
+ mac_dev->priv->disable = memac_disable;
+}
+
+#define DTSEC_SUPPORTED \
+ (SUPPORTED_10baseT_Half \
+ | SUPPORTED_10baseT_Full \
+ | SUPPORTED_100baseT_Half \
+ | SUPPORTED_100baseT_Full \
+ | SUPPORTED_Autoneg \
+ | SUPPORTED_Pause \
+ | SUPPORTED_Asym_Pause \
+ | SUPPORTED_MII)
+
+static DEFINE_MUTEX(eth_lock);
+
+static const char phy_str[][11] = {
+ [PHY_INTERFACE_MODE_MII] = "mii",
+ [PHY_INTERFACE_MODE_GMII] = "gmii",
+ [PHY_INTERFACE_MODE_SGMII] = "sgmii",
+ [PHY_INTERFACE_MODE_TBI] = "tbi",
+ [PHY_INTERFACE_MODE_RMII] = "rmii",
+ [PHY_INTERFACE_MODE_RGMII] = "rgmii",
+ [PHY_INTERFACE_MODE_RGMII_ID] = "rgmii-id",
+ [PHY_INTERFACE_MODE_RGMII_RXID] = "rgmii-rxid",
+ [PHY_INTERFACE_MODE_RGMII_TXID] = "rgmii-txid",
+ [PHY_INTERFACE_MODE_RTBI] = "rtbi",
+ [PHY_INTERFACE_MODE_XGMII] = "xgmii"
+};
+
+static phy_interface_t __pure __attribute__((nonnull)) str2phy(const char *str)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(phy_str); i++)
+ if (strcmp(str, phy_str[i]) == 0)
+ return (phy_interface_t)i;
+
+ return PHY_INTERFACE_MODE_MII;
+}
+
+static const u16 phy2speed[] = {
+ [PHY_INTERFACE_MODE_MII] = SPEED_100,
+ [PHY_INTERFACE_MODE_GMII] = SPEED_1000,
+ [PHY_INTERFACE_MODE_SGMII] = SPEED_1000,
+ [PHY_INTERFACE_MODE_TBI] = SPEED_1000,
+ [PHY_INTERFACE_MODE_RMII] = SPEED_100,
+ [PHY_INTERFACE_MODE_RGMII] = SPEED_1000,
+ [PHY_INTERFACE_MODE_RGMII_ID] = SPEED_1000,
+ [PHY_INTERFACE_MODE_RGMII_RXID] = SPEED_1000,
+ [PHY_INTERFACE_MODE_RGMII_TXID] = SPEED_1000,
+ [PHY_INTERFACE_MODE_RTBI] = SPEED_1000,
+ [PHY_INTERFACE_MODE_XGMII] = SPEED_10000
+};
+
+static struct platform_device *dpaa_eth_add_device(int fman_id,
+ struct mac_device *mac_dev,
+ struct device_node *node)
+{
+ struct platform_device *pdev;
+ struct dpaa_eth_data data;
+ struct mac_priv_s *priv;
+ static int dpaa_eth_dev_cnt;
+ int ret;
+
+ priv = mac_dev->priv;
+
+ data.mac_dev = mac_dev;
+ data.mac_hw_id = priv->cell_index;
+ data.fman_hw_id = fman_id;
+ data.mac_node = node;
+
+ mutex_lock(&eth_lock);
+
+ pdev = platform_device_alloc("dpaa-ethernet", dpaa_eth_dev_cnt);
+ if (!pdev) {
+ ret = -ENOMEM;
+ goto no_mem;
+ }
+
+ ret = platform_device_add_data(pdev, &data, sizeof(data));
+ if (ret)
+ goto err;
+
+ ret = platform_device_add(pdev);
+ if (ret)
+ goto err;
+
+ dpaa_eth_dev_cnt++;
+ mutex_unlock(&eth_lock);
+
+ return pdev;
+
+err:
+ platform_device_put(pdev);
+no_mem:
+ mutex_unlock(&eth_lock);
+
+ return ERR_PTR(ret);
+}
+
+static const struct of_device_id mac_match[] = {
+ { .compatible = "fsl,fman-dtsec" },
+ { .compatible = "fsl,fman-xgec" },
+ { .compatible = "fsl,fman-memac" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mac_match);
+
+static int mac_probe(struct platform_device *_of_dev)
+{
+ int err, i, lenp, nph;
+ struct device *dev;
+ struct device_node *mac_node, *dev_node;
+ struct mac_device *mac_dev;
+ struct platform_device *of_dev;
+ struct resource res;
+ struct mac_priv_s *priv;
+ const u8 *mac_addr;
+ const char *char_prop;
+ const u32 *u32_prop;
+ u8 fman_id;
+
+ dev = &_of_dev->dev;
+ mac_node = dev->of_node;
+
+ mac_dev = devm_kzalloc(dev, sizeof(*mac_dev), GFP_KERNEL);
+ if (!mac_dev) {
+ err = -ENOMEM;
+ dev_err(dev, "devm_kzalloc() = %d\n", err);
+ goto _return;
+ }
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ err = -ENOMEM;
+ goto _return;
+ }
+
+ /* Save private information */
+ mac_dev->priv = priv;
+ priv->dev = dev;
+
+ if (of_device_is_compatible(mac_node, "fsl,fman-dtsec")) {
+ setup_dtsec(mac_dev);
+ priv->internal_phy_node = of_parse_phandle(mac_node,
+ "tbi-handle", 0);
+ } else if (of_device_is_compatible(mac_node, "fsl,fman-xgec")) {
+ setup_tgec(mac_dev);
+ } else if (of_device_is_compatible(mac_node, "fsl,fman-memac")) {
+ setup_memac(mac_dev);
+ priv->internal_phy_node = of_parse_phandle(mac_node,
+ "pcsphy-handle", 0);
+ } else {
+ dev_err(dev, "MAC node (%s) contains unsupported MAC\n",
+ mac_node->full_name);
+ err = -EINVAL;
+ goto _return;
+ }
+
+ /* Register mac_dev */
+ dev_set_drvdata(dev, mac_dev);
+
+ INIT_LIST_HEAD(&priv->mc_addr_list);
+
+ /* Get the FM node */
+ dev_node = of_get_parent(mac_node);
+ if (!dev_node) {
+ dev_err(dev, "of_get_parent(%s) failed\n",
+ mac_node->full_name);
+ err = -EINVAL;
+ goto _return_dev_set_drvdata;
+ }
+
+ of_dev = of_find_device_by_node(dev_node);
+ if (!of_dev) {
+ dev_err(dev, "of_find_device_by_node(%s) failed\n",
+ dev_node->full_name);
+ err = -EINVAL;
+ goto _return_of_node_put;
+ }
+
+ /* Get the FMan cell-index */
+ u32_prop = of_get_property(dev_node, "cell-index", &lenp);
+ if (!u32_prop) {
+ dev_err(dev, "of_get_property(%s, cell-index) failed\n",
+ dev_node->full_name);
+ err = -EINVAL;
+ goto _return_of_node_put;
+ }
+ WARN_ON(lenp != sizeof(u32));
+ /* cell-index 0 => FMan id 1 */
+ fman_id = (u8)(fdt32_to_cpu(u32_prop[0]) + 1);
+
+ priv->fman = fman_bind(&of_dev->dev);
+ if (!priv->fman) {
+ dev_err(dev, "fman_bind(%s) failed\n", dev_node->full_name);
+ err = -ENODEV;
+ goto _return_of_node_put;
+ }
+
+ of_node_put(dev_node);
+
+ /* Get the address of the memory mapped registers */
+ err = of_address_to_resource(mac_node, 0, &res);
+ if (err < 0) {
+ dev_err(dev, "of_address_to_resource(%s) = %d\n",
+ mac_node->full_name, err);
+ goto _return_dev_set_drvdata;
+ }
+
+ mac_dev->res = __devm_request_region(dev,
+ fman_get_mem_region(priv->fman),
+ res.start, res.end + 1 - res.start,
+ "mac");
+ if (!mac_dev->res) {
+ dev_err(dev, "__devm_request_mem_region(mac) failed\n");
+ err = -EBUSY;
+ goto _return_dev_set_drvdata;
+ }
+
+ priv->vaddr = devm_ioremap(dev, mac_dev->res->start,
+ mac_dev->res->end + 1 - mac_dev->res->start);
+ if (!priv->vaddr) {
+ dev_err(dev, "devm_ioremap() failed\n");
+ err = -EIO;
+ goto _return_dev_set_drvdata;
+ }
+
+ if (!of_device_is_available(mac_node)) {
+ devm_iounmap(dev, priv->vaddr);
+ __devm_release_region(dev, fman_get_mem_region(priv->fman),
+ res.start, res.end + 1 - res.start);
+ devm_kfree(dev, mac_dev);
+ dev_set_drvdata(dev, NULL);
+ return -ENODEV;
+ }
+
+ /* Get the cell-index */
+ u32_prop = of_get_property(mac_node, "cell-index", &lenp);
+ if (!u32_prop) {
+ dev_err(dev, "of_get_property(%s, cell-index) failed\n",
+ mac_node->full_name);
+ err = -EINVAL;
+ goto _return_dev_set_drvdata;
+ }
+ WARN_ON(lenp != sizeof(u32));
+ priv->cell_index = (u8)fdt32_to_cpu(u32_prop[0]);
+
+ /* Get the MAC address */
+ mac_addr = of_get_mac_address(mac_node);
+ if (!mac_addr) {
+ dev_err(dev, "of_get_mac_address(%s) failed\n",
+ mac_node->full_name);
+ err = -EINVAL;
+ goto _return_dev_set_drvdata;
+ }
+ memcpy(mac_dev->addr, mac_addr, sizeof(mac_dev->addr));
+
+ /* Get the port handles */
+ nph = of_count_phandle_with_args(mac_node, "fsl,fman-ports", NULL);
+ if (unlikely(nph < 0)) {
+ dev_err(dev, "of_count_phandle_with_args(%s, fsl,fman-ports) failed\n",
+ mac_node->full_name);
+ err = nph;
+ goto _return_dev_set_drvdata;
+ }
+
+ if (nph != ARRAY_SIZE(mac_dev->port)) {
+ dev_err(dev, "Not supported number of fman-ports handles of mac node %s from device tree\n",
+ mac_node->full_name);
+ err = -EINVAL;
+ goto _return_dev_set_drvdata;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
+ /* Find the port node */
+ dev_node = of_parse_phandle(mac_node, "fsl,fman-ports", i);
+ if (!dev_node) {
+ dev_err(dev, "of_parse_phandle(%s, fsl,fman-ports) failed\n",
+ mac_node->full_name);
+ err = -EINVAL;
+ goto _return_of_node_put;
+ }
+
+ of_dev = of_find_device_by_node(dev_node);
+ if (!of_dev) {
+ dev_err(dev, "of_find_device_by_node(%s) failed\n",
+ dev_node->full_name);
+ err = -EINVAL;
+ goto _return_of_node_put;
+ }
+
+ mac_dev->port[i] = fman_port_bind(&of_dev->dev);
+ if (!mac_dev->port[i]) {
+ dev_err(dev, "dev_get_drvdata(%s) failed\n",
+ dev_node->full_name);
+ err = -EINVAL;
+ goto _return_of_node_put;
+ }
+ of_node_put(dev_node);
+ }
+
+ /* Get the PHY connection type */
+ char_prop = (const char *)of_get_property(mac_node,
+ "phy-connection-type", NULL);
+ if (!char_prop) {
+ dev_warn(dev,
+ "of_get_property(%s, phy-connection-type) failed. Defaulting to MII\n",
+ mac_node->full_name);
+ priv->phy_if = PHY_INTERFACE_MODE_MII;
+ } else {
+ priv->phy_if = str2phy(char_prop);
+ }
+
+ priv->speed = phy2speed[priv->phy_if];
+ priv->max_speed = priv->speed;
+ mac_dev->if_support = DTSEC_SUPPORTED;
+ /* We don't support half-duplex in SGMII mode */
+ if (priv->phy_if == PHY_INTERFACE_MODE_SGMII)
+ mac_dev->if_support &= ~(SUPPORTED_10baseT_Half |
+ SUPPORTED_100baseT_Half);
+
+ /* Gigabit support (no half-duplex) */
+ if (priv->max_speed == 1000)
+ mac_dev->if_support |= SUPPORTED_1000baseT_Full;
+
+ /* The 10G interface only supports one mode */
+ if (priv->phy_if == PHY_INTERFACE_MODE_XGMII)
+ mac_dev->if_support = SUPPORTED_10000baseT_Full;
+
+ /* Get the rest of the PHY information */
+ priv->phy_node = of_parse_phandle(mac_node, "phy-handle", 0);
+ if (!priv->phy_node && of_phy_is_fixed_link(mac_node)) {
+ struct phy_device *phy;
+
+ err = of_phy_register_fixed_link(mac_node);
+ if (err)
+ goto _return_dev_set_drvdata;
+
+ priv->fixed_link = kzalloc(sizeof(*priv->fixed_link),
+ GFP_KERNEL);
+ if (!priv->fixed_link)
+ goto _return_dev_set_drvdata;
+
+ priv->phy_node = of_node_get(mac_node);
+ phy = of_phy_find_device(priv->phy_node);
+ if (!phy)
+ goto _return_dev_set_drvdata;
+
+ priv->fixed_link->link = phy->link;
+ priv->fixed_link->speed = phy->speed;
+ priv->fixed_link->duplex = phy->duplex;
+ priv->fixed_link->pause = phy->pause;
+ priv->fixed_link->asym_pause = phy->asym_pause;
+ }
+
+ err = mac_dev->init(mac_dev);
+ if (err < 0) {
+ dev_err(dev, "mac_dev->init() = %d\n", err);
+ of_node_put(priv->phy_node);
+ goto _return_dev_set_drvdata;
+ }
+
+ /* pause frame autonegotiation enabled */
+ mac_dev->autoneg_pause = true;
+
+ /* By intializing the values to false, force FMD to enable PAUSE frames
+ * on RX and TX
+ */
+ mac_dev->rx_pause_req = true;
+ mac_dev->tx_pause_req = true;
+ mac_dev->rx_pause_active = false;
+ mac_dev->tx_pause_active = false;
+ err = fman_set_mac_active_pause(mac_dev, true, true);
+ if (err < 0)
+ dev_err(dev, "fman_set_mac_active_pause() = %d\n", err);
+
+ dev_info(dev, "FMan MAC address: %02hx:%02hx:%02hx:%02hx:%02hx:%02hx\n",
+ mac_dev->addr[0], mac_dev->addr[1], mac_dev->addr[2],
+ mac_dev->addr[3], mac_dev->addr[4], mac_dev->addr[5]);
+
+ priv->eth_dev = dpaa_eth_add_device(fman_id, mac_dev, mac_node);
+ if (IS_ERR(priv->eth_dev)) {
+ dev_err(dev, "failed to add Ethernet platform device for MAC %d\n",
+ priv->cell_index);
+ priv->eth_dev = NULL;
+ }
+
+ goto _return;
+
+_return_of_node_put:
+ of_node_put(dev_node);
+_return_dev_set_drvdata:
+ kfree(priv->fixed_link);
+ kfree(priv);
+ dev_set_drvdata(dev, NULL);
+_return:
+ return err;
+}
+
+static struct platform_driver mac_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = mac_match,
+ },
+ .probe = mac_probe,
+};
+
+builtin_platform_driver(mac_driver);
diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h
new file mode 100644
index 0000000..0211cc9
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fman/mac.h
@@ -0,0 +1,97 @@
+/* Copyright 2008-2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __MAC_H
+#define __MAC_H
+
+#include <linux/device.h>
+#include <linux/if_ether.h>
+#include <linux/phy.h>
+#include <linux/list.h>
+
+#include "fman_port.h"
+#include "fman.h"
+#include "fman_mac.h"
+
+struct fman_mac;
+struct mac_priv_s;
+
+struct mac_device {
+ struct resource *res;
+ u8 addr[ETH_ALEN];
+ struct fman_port *port[2];
+ u32 if_support;
+ struct phy_device *phy_dev;
+
+ bool autoneg_pause;
+ bool rx_pause_req;
+ bool tx_pause_req;
+ bool rx_pause_active;
+ bool tx_pause_active;
+ bool promisc;
+
+ int (*init_phy)(struct net_device *net_dev, struct mac_device *mac_dev);
+ int (*init)(struct mac_device *mac_dev);
+ int (*start)(struct mac_device *mac_dev);
+ int (*stop)(struct mac_device *mac_dev);
+ int (*set_promisc)(struct fman_mac *mac_dev, bool enable);
+ int (*change_addr)(struct fman_mac *mac_dev, enet_addr_t *enet_addr);
+ int (*set_multi)(struct net_device *net_dev,
+ struct mac_device *mac_dev);
+ int (*set_rx_pause)(struct fman_mac *mac_dev, bool en);
+ int (*set_tx_pause)(struct fman_mac *mac_dev, u8 priority,
+ u16 pause_time, u16 thresh_time);
+ int (*set_exception)(struct fman_mac *mac_dev,
+ enum fman_mac_exceptions exception, bool enable);
+ int (*add_hash_mac_addr)(struct fman_mac *mac_dev,
+ enet_addr_t *eth_addr);
+ int (*remove_hash_mac_addr)(struct fman_mac *mac_dev,
+ enet_addr_t *eth_addr);
+
+ struct fman_mac *fman_mac;
+ struct mac_priv_s *priv;
+};
+
+struct dpaa_eth_data {
+ struct device_node *mac_node;
+ struct mac_device *mac_dev;
+ int mac_hw_id;
+ int fman_hw_id;
+};
+
+extern const char *mac_driver_description;
+
+int fman_set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx);
+
+void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause,
+ bool *tx_pause);
+
+#endif /* __MAC_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index 8c30cec..d2263c7 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -499,8 +499,7 @@ void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common)
int base_irq_idx = hns_rcb_get_base_irq_idx(rcb_common);
struct device_node *np = rcb_common->dsaf_dev->dev->of_node;
struct platform_device *pdev =
- container_of(rcb_common->dsaf_dev->dev,
- struct platform_device, dev);
+ to_platform_device(rcb_common->dsaf_dev->dev);
bool is_ver1 = AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver);
for (i = 0; i < ring_num; i++) {
diff --git a/drivers/net/ethernet/ibm/Kconfig b/drivers/net/ethernet/ibm/Kconfig
index 99c1ceb..37dceab 100644
--- a/drivers/net/ethernet/ibm/Kconfig
+++ b/drivers/net/ethernet/ibm/Kconfig
@@ -37,4 +37,14 @@ config EHEA
To compile the driver as a module, choose M here. The module
will be called ehea.
+config IBMVNIC
+ tristate "IBM Virtual NIC support"
+ depends on PPC_PSERIES
+ ---help---
+ This driver supports Virtual NIC adapters on IBM i and IBM System p
+ systems.
+
+ To compile this driver as a module, choose M here. The module will
+ be called ibmvnic.
+
endif # NET_VENDOR_IBM
diff --git a/drivers/net/ethernet/ibm/Makefile b/drivers/net/ethernet/ibm/Makefile
index 2f04e71..447865c 100644
--- a/drivers/net/ethernet/ibm/Makefile
+++ b/drivers/net/ethernet/ibm/Makefile
@@ -3,5 +3,6 @@
#
obj-$(CONFIG_IBMVETH) += ibmveth.o
+obj-$(CONFIG_IBMVNIC) += ibmvnic.o
obj-$(CONFIG_IBM_EMAC) += emac/
obj-$(CONFIG_EHEA) += ehea/
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 6691b5a..335417b 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -169,7 +169,7 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
if (!pool->free_map)
return -1;
- pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL);
+ pool->dma_addr = kcalloc(pool->size, sizeof(dma_addr_t), GFP_KERNEL);
if (!pool->dma_addr) {
kfree(pool->free_map);
pool->free_map = NULL;
@@ -187,8 +187,6 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
return -1;
}
- memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size);
-
for (i = 0; i < pool->size; ++i)
pool->free_map[i] = i;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
new file mode 100644
index 0000000..7d657084
--- /dev/null
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -0,0 +1,3585 @@
+/**************************************************************************/
+/* */
+/* IBM System i and System p Virtual NIC Device Driver */
+/* Copyright (C) 2014 IBM Corp. */
+/* Santiago Leon (santi_leon@yahoo.com) */
+/* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
+/* John Allen (jallen@linux.vnet.ibm.com) */
+/* */
+/* This program is free software; you can redistribute it and/or modify */
+/* it under the terms of the GNU General Public License as published by */
+/* the Free Software Foundation; either version 2 of the License, or */
+/* (at your option) any later version. */
+/* */
+/* This program is distributed in the hope that it will be useful, */
+/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
+/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
+/* GNU General Public License for more details. */
+/* */
+/* You should have received a copy of the GNU General Public License */
+/* along with this program. */
+/* */
+/* This module contains the implementation of a virtual ethernet device */
+/* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
+/* option of the RS/6000 Platform Architecture to interface with virtual */
+/* ethernet NICs that are presented to the partition by the hypervisor. */
+/* */
+/* Messages are passed between the VNIC driver and the VNIC server using */
+/* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
+/* issue and receive commands that initiate communication with the server */
+/* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
+/* are used by the driver to notify the server that a packet is */
+/* ready for transmission or that a buffer has been added to receive a */
+/* packet. Subsequently, sCRQs are used by the server to notify the */
+/* driver that a packet transmission has been completed or that a packet */
+/* has been received and placed in a waiting buffer. */
+/* */
+/* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
+/* which skbs are DMA mapped and immediately unmapped when the transmit */
+/* or receive has been completed, the VNIC driver is required to use */
+/* "long term mapping". This entails that large, continuous DMA mapped */
+/* buffers are allocated on driver initialization and these buffers are */
+/* then continuously reused to pass skbs to and from the VNIC server. */
+/* */
+/**************************************************************************/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/completion.h>
+#include <linux/ioport.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/ethtool.h>
+#include <linux/proc_fs.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/irq.h>
+#include <linux/kthread.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/interrupt.h>
+#include <net/net_namespace.h>
+#include <asm/hvcall.h>
+#include <linux/atomic.h>
+#include <asm/vio.h>
+#include <asm/iommu.h>
+#include <linux/uaccess.h>
+#include <asm/firmware.h>
+#include <linux/seq_file.h>
+
+#include "ibmvnic.h"
+
+static const char ibmvnic_driver_name[] = "ibmvnic";
+static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
+
+MODULE_AUTHOR("Santiago Leon <santi_leon@yahoo.com>");
+MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
+
+static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
+static int ibmvnic_remove(struct vio_dev *);
+static void release_sub_crqs(struct ibmvnic_adapter *);
+static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
+static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
+static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
+static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
+static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
+ union sub_crq *sub_crq);
+static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
+static int enable_scrq_irq(struct ibmvnic_adapter *,
+ struct ibmvnic_sub_crq_queue *);
+static int disable_scrq_irq(struct ibmvnic_adapter *,
+ struct ibmvnic_sub_crq_queue *);
+static int pending_scrq(struct ibmvnic_adapter *,
+ struct ibmvnic_sub_crq_queue *);
+static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
+ struct ibmvnic_sub_crq_queue *);
+static int ibmvnic_poll(struct napi_struct *napi, int data);
+static void send_map_query(struct ibmvnic_adapter *adapter);
+static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
+static void send_request_unmap(struct ibmvnic_adapter *, u8);
+
+struct ibmvnic_stat {
+ char name[ETH_GSTRING_LEN];
+ int offset;
+};
+
+#define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
+ offsetof(struct ibmvnic_statistics, stat))
+#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
+
+static const struct ibmvnic_stat ibmvnic_stats[] = {
+ {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
+ {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
+ {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
+ {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
+ {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
+ {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
+ {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
+ {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
+ {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
+ {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
+ {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
+ {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
+ {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
+ {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
+ {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
+ {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
+ {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
+ {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
+ {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
+ {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
+ {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
+ {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
+};
+
+static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
+ unsigned long length, unsigned long *number,
+ unsigned long *irq)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ long rc;
+
+ rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
+ *number = retbuf[0];
+ *irq = retbuf[1];
+
+ return rc;
+}
+
+/* net_device_ops functions */
+
+static void init_rx_pool(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_rx_pool *rx_pool, int num, int index,
+ int buff_size, int active)
+{
+ netdev_dbg(adapter->netdev,
+ "Initializing rx_pool %d, %d buffs, %d bytes each\n",
+ index, num, buff_size);
+ rx_pool->size = num;
+ rx_pool->index = index;
+ rx_pool->buff_size = buff_size;
+ rx_pool->active = active;
+}
+
+static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_long_term_buff *ltb, int size)
+{
+ struct device *dev = &adapter->vdev->dev;
+
+ ltb->size = size;
+ ltb->buff = dma_alloc_coherent(dev, ltb->size, &ltb->addr,
+ GFP_KERNEL);
+
+ if (!ltb->buff) {
+ dev_err(dev, "Couldn't alloc long term buffer\n");
+ return -ENOMEM;
+ }
+ ltb->map_id = adapter->map_id;
+ adapter->map_id++;
+ send_request_map(adapter, ltb->addr,
+ ltb->size, ltb->map_id);
+ init_completion(&adapter->fw_done);
+ wait_for_completion(&adapter->fw_done);
+ return 0;
+}
+
+static void free_long_term_buff(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_long_term_buff *ltb)
+{
+ struct device *dev = &adapter->vdev->dev;
+
+ dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
+ send_request_unmap(adapter, ltb->map_id);
+}
+
+static int alloc_rx_pool(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_rx_pool *pool)
+{
+ struct device *dev = &adapter->vdev->dev;
+ int i;
+
+ pool->free_map = kcalloc(pool->size, sizeof(int), GFP_KERNEL);
+ if (!pool->free_map)
+ return -ENOMEM;
+
+ pool->rx_buff = kcalloc(pool->size, sizeof(struct ibmvnic_rx_buff),
+ GFP_KERNEL);
+
+ if (!pool->rx_buff) {
+ dev_err(dev, "Couldn't alloc rx buffers\n");
+ kfree(pool->free_map);
+ return -ENOMEM;
+ }
+
+ if (alloc_long_term_buff(adapter, &pool->long_term_buff,
+ pool->size * pool->buff_size)) {
+ kfree(pool->free_map);
+ kfree(pool->rx_buff);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < pool->size; ++i)
+ pool->free_map[i] = i;
+
+ atomic_set(&pool->available, 0);
+ pool->next_alloc = 0;
+ pool->next_free = 0;
+
+ return 0;
+}
+
+static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_rx_pool *pool)
+{
+ int count = pool->size - atomic_read(&pool->available);
+ struct device *dev = &adapter->vdev->dev;
+ int buffers_added = 0;
+ unsigned long lpar_rc;
+ union sub_crq sub_crq;
+ struct sk_buff *skb;
+ unsigned int offset;
+ dma_addr_t dma_addr;
+ unsigned char *dst;
+ u64 *handle_array;
+ int shift = 0;
+ int index;
+ int i;
+
+ handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
+ be32_to_cpu(adapter->login_rsp_buf->
+ off_rxadd_subcrqs));
+
+ for (i = 0; i < count; ++i) {
+ skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
+ if (!skb) {
+ dev_err(dev, "Couldn't replenish rx buff\n");
+ adapter->replenish_no_mem++;
+ break;
+ }
+
+ index = pool->free_map[pool->next_free];
+
+ if (pool->rx_buff[index].skb)
+ dev_err(dev, "Inconsistent free_map!\n");
+
+ /* Copy the skb to the long term mapped DMA buffer */
+ offset = index * pool->buff_size;
+ dst = pool->long_term_buff.buff + offset;
+ memset(dst, 0, pool->buff_size);
+ dma_addr = pool->long_term_buff.addr + offset;
+ pool->rx_buff[index].data = dst;
+
+ pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
+ pool->rx_buff[index].dma = dma_addr;
+ pool->rx_buff[index].skb = skb;
+ pool->rx_buff[index].pool_index = pool->index;
+ pool->rx_buff[index].size = pool->buff_size;
+
+ memset(&sub_crq, 0, sizeof(sub_crq));
+ sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
+ sub_crq.rx_add.correlator =
+ cpu_to_be64((u64)&pool->rx_buff[index]);
+ sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
+ sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
+
+ /* The length field of the sCRQ is defined to be 24 bits so the
+ * buffer size needs to be left shifted by a byte before it is
+ * converted to big endian to prevent the last byte from being
+ * truncated.
+ */
+#ifdef __LITTLE_ENDIAN__
+ shift = 8;
+#endif
+ sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
+
+ lpar_rc = send_subcrq(adapter, handle_array[pool->index],
+ &sub_crq);
+ if (lpar_rc != H_SUCCESS)
+ goto failure;
+
+ buffers_added++;
+ adapter->replenish_add_buff_success++;
+ pool->next_free = (pool->next_free + 1) % pool->size;
+ }
+ atomic_add(buffers_added, &pool->available);
+ return;
+
+failure:
+ dev_info(dev, "replenish pools failure\n");
+ pool->free_map[pool->next_free] = index;
+ pool->rx_buff[index].skb = NULL;
+ if (!dma_mapping_error(dev, dma_addr))
+ dma_unmap_single(dev, dma_addr, pool->buff_size,
+ DMA_FROM_DEVICE);
+
+ dev_kfree_skb_any(skb);
+ adapter->replenish_add_buff_failure++;
+ atomic_add(buffers_added, &pool->available);
+}
+
+static void replenish_pools(struct ibmvnic_adapter *adapter)
+{
+ int i;
+
+ if (adapter->migrated)
+ return;
+
+ adapter->replenish_task_cycles++;
+ for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
+ i++) {
+ if (adapter->rx_pool[i].active)
+ replenish_rx_pool(adapter, &adapter->rx_pool[i]);
+ }
+}
+
+static void free_rx_pool(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_rx_pool *pool)
+{
+ int i;
+
+ kfree(pool->free_map);
+ pool->free_map = NULL;
+
+ if (!pool->rx_buff)
+ return;
+
+ for (i = 0; i < pool->size; i++) {
+ if (pool->rx_buff[i].skb) {
+ dev_kfree_skb_any(pool->rx_buff[i].skb);
+ pool->rx_buff[i].skb = NULL;
+ }
+ }
+ kfree(pool->rx_buff);
+ pool->rx_buff = NULL;
+}
+
+static int ibmvnic_open(struct net_device *netdev)
+{
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ struct device *dev = &adapter->vdev->dev;
+ struct ibmvnic_tx_pool *tx_pool;
+ union ibmvnic_crq crq;
+ int rxadd_subcrqs;
+ u64 *size_array;
+ int tx_subcrqs;
+ int i, j;
+
+ rxadd_subcrqs =
+ be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
+ tx_subcrqs =
+ be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
+ size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
+ be32_to_cpu(adapter->login_rsp_buf->
+ off_rxadd_buff_size));
+ adapter->map_id = 1;
+ adapter->napi = kcalloc(adapter->req_rx_queues,
+ sizeof(struct napi_struct), GFP_KERNEL);
+ if (!adapter->napi)
+ goto alloc_napi_failed;
+ for (i = 0; i < adapter->req_rx_queues; i++) {
+ netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll,
+ NAPI_POLL_WEIGHT);
+ napi_enable(&adapter->napi[i]);
+ }
+ adapter->rx_pool =
+ kcalloc(rxadd_subcrqs, sizeof(struct ibmvnic_rx_pool), GFP_KERNEL);
+
+ if (!adapter->rx_pool)
+ goto rx_pool_arr_alloc_failed;
+ send_map_query(adapter);
+ for (i = 0; i < rxadd_subcrqs; i++) {
+ init_rx_pool(adapter, &adapter->rx_pool[i],
+ IBMVNIC_BUFFS_PER_POOL, i,
+ be64_to_cpu(size_array[i]), 1);
+ if (alloc_rx_pool(adapter, &adapter->rx_pool[i])) {
+ dev_err(dev, "Couldn't alloc rx pool\n");
+ goto rx_pool_alloc_failed;
+ }
+ }
+ adapter->tx_pool =
+ kcalloc(tx_subcrqs, sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
+
+ if (!adapter->tx_pool)
+ goto tx_pool_arr_alloc_failed;
+ for (i = 0; i < tx_subcrqs; i++) {
+ tx_pool = &adapter->tx_pool[i];
+ tx_pool->tx_buff =
+ kcalloc(adapter->max_tx_entries_per_subcrq,
+ sizeof(struct ibmvnic_tx_buff), GFP_KERNEL);
+ if (!tx_pool->tx_buff)
+ goto tx_pool_alloc_failed;
+
+ if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
+ adapter->max_tx_entries_per_subcrq *
+ adapter->req_mtu))
+ goto tx_ltb_alloc_failed;
+
+ tx_pool->free_map =
+ kcalloc(adapter->max_tx_entries_per_subcrq,
+ sizeof(int), GFP_KERNEL);
+ if (!tx_pool->free_map)
+ goto tx_fm_alloc_failed;
+
+ for (j = 0; j < adapter->max_tx_entries_per_subcrq; j++)
+ tx_pool->free_map[j] = j;
+
+ tx_pool->consumer_index = 0;
+ tx_pool->producer_index = 0;
+ }
+ adapter->bounce_buffer_size =
+ (netdev->mtu + ETH_HLEN - 1) / PAGE_SIZE + 1;
+ adapter->bounce_buffer = kmalloc(adapter->bounce_buffer_size,
+ GFP_KERNEL);
+ if (!adapter->bounce_buffer)
+ goto bounce_alloc_failed;
+
+ adapter->bounce_buffer_dma = dma_map_single(dev, adapter->bounce_buffer,
+ adapter->bounce_buffer_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
+ dev_err(dev, "Couldn't map tx bounce buffer\n");
+ goto bounce_map_failed;
+ }
+ replenish_pools(adapter);
+
+ /* We're ready to receive frames, enable the sub-crq interrupts and
+ * set the logical link state to up
+ */
+ for (i = 0; i < adapter->req_rx_queues; i++)
+ enable_scrq_irq(adapter, adapter->rx_scrq[i]);
+
+ for (i = 0; i < adapter->req_tx_queues; i++)
+ enable_scrq_irq(adapter, adapter->tx_scrq[i]);
+
+ memset(&crq, 0, sizeof(crq));
+ crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
+ crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
+ crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_UP;
+ ibmvnic_send_crq(adapter, &crq);
+
+ netif_start_queue(netdev);
+ return 0;
+
+bounce_map_failed:
+ kfree(adapter->bounce_buffer);
+bounce_alloc_failed:
+ i = tx_subcrqs - 1;
+ kfree(adapter->tx_pool[i].free_map);
+tx_fm_alloc_failed:
+ free_long_term_buff(adapter, &adapter->tx_pool[i].long_term_buff);
+tx_ltb_alloc_failed:
+ kfree(adapter->tx_pool[i].tx_buff);
+tx_pool_alloc_failed:
+ for (j = 0; j < i; j++) {
+ kfree(adapter->tx_pool[j].tx_buff);
+ free_long_term_buff(adapter,
+ &adapter->tx_pool[j].long_term_buff);
+ kfree(adapter->tx_pool[j].free_map);
+ }
+ kfree(adapter->tx_pool);
+ adapter->tx_pool = NULL;
+tx_pool_arr_alloc_failed:
+ i = rxadd_subcrqs;
+rx_pool_alloc_failed:
+ for (j = 0; j < i; j++) {
+ free_rx_pool(adapter, &adapter->rx_pool[j]);
+ free_long_term_buff(adapter,
+ &adapter->rx_pool[j].long_term_buff);
+ }
+ kfree(adapter->rx_pool);
+ adapter->rx_pool = NULL;
+rx_pool_arr_alloc_failed:
+ for (i = 0; i < adapter->req_rx_queues; i++)
+ napi_enable(&adapter->napi[i]);
+alloc_napi_failed:
+ return -ENOMEM;
+}
+
+static int ibmvnic_close(struct net_device *netdev)
+{
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ struct device *dev = &adapter->vdev->dev;
+ union ibmvnic_crq crq;
+ int i;
+
+ adapter->closing = true;
+
+ for (i = 0; i < adapter->req_rx_queues; i++)
+ napi_disable(&adapter->napi[i]);
+
+ netif_stop_queue(netdev);
+
+ if (adapter->bounce_buffer) {
+ if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
+ dma_unmap_single(&adapter->vdev->dev,
+ adapter->bounce_buffer_dma,
+ adapter->bounce_buffer_size,
+ DMA_BIDIRECTIONAL);
+ adapter->bounce_buffer_dma = DMA_ERROR_CODE;
+ }
+ kfree(adapter->bounce_buffer);
+ adapter->bounce_buffer = NULL;
+ }
+
+ memset(&crq, 0, sizeof(crq));
+ crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
+ crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
+ crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_DN;
+ ibmvnic_send_crq(adapter, &crq);
+
+ for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
+ i++) {
+ kfree(adapter->tx_pool[i].tx_buff);
+ free_long_term_buff(adapter,
+ &adapter->tx_pool[i].long_term_buff);
+ kfree(adapter->tx_pool[i].free_map);
+ }
+ kfree(adapter->tx_pool);
+ adapter->tx_pool = NULL;
+
+ for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
+ i++) {
+ free_rx_pool(adapter, &adapter->rx_pool[i]);
+ free_long_term_buff(adapter,
+ &adapter->rx_pool[i].long_term_buff);
+ }
+ kfree(adapter->rx_pool);
+ adapter->rx_pool = NULL;
+
+ adapter->closing = false;
+
+ return 0;
+}
+
+static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ int queue_num = skb_get_queue_mapping(skb);
+ struct device *dev = &adapter->vdev->dev;
+ struct ibmvnic_tx_buff *tx_buff = NULL;
+ struct ibmvnic_tx_pool *tx_pool;
+ unsigned int tx_send_failed = 0;
+ unsigned int tx_map_failed = 0;
+ unsigned int tx_dropped = 0;
+ unsigned int tx_packets = 0;
+ unsigned int tx_bytes = 0;
+ dma_addr_t data_dma_addr;
+ struct netdev_queue *txq;
+ bool used_bounce = false;
+ unsigned long lpar_rc;
+ union sub_crq tx_crq;
+ unsigned int offset;
+ unsigned char *dst;
+ u64 *handle_array;
+ int index = 0;
+ int ret = 0;
+
+ tx_pool = &adapter->tx_pool[queue_num];
+ txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
+ handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
+ be32_to_cpu(adapter->login_rsp_buf->
+ off_txsubm_subcrqs));
+ if (adapter->migrated) {
+ tx_send_failed++;
+ tx_dropped++;
+ ret = NETDEV_TX_BUSY;
+ goto out;
+ }
+
+ index = tx_pool->free_map[tx_pool->consumer_index];
+ offset = index * adapter->req_mtu;
+ dst = tx_pool->long_term_buff.buff + offset;
+ memset(dst, 0, adapter->req_mtu);
+ skb_copy_from_linear_data(skb, dst, skb->len);
+ data_dma_addr = tx_pool->long_term_buff.addr + offset;
+
+ tx_pool->consumer_index =
+ (tx_pool->consumer_index + 1) %
+ adapter->max_tx_entries_per_subcrq;
+
+ tx_buff = &tx_pool->tx_buff[index];
+ tx_buff->skb = skb;
+ tx_buff->data_dma[0] = data_dma_addr;
+ tx_buff->data_len[0] = skb->len;
+ tx_buff->index = index;
+ tx_buff->pool_index = queue_num;
+ tx_buff->last_frag = true;
+ tx_buff->used_bounce = used_bounce;
+
+ memset(&tx_crq, 0, sizeof(tx_crq));
+ tx_crq.v1.first = IBMVNIC_CRQ_CMD;
+ tx_crq.v1.type = IBMVNIC_TX_DESC;
+ tx_crq.v1.n_crq_elem = 1;
+ tx_crq.v1.n_sge = 1;
+ tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
+ tx_crq.v1.correlator = cpu_to_be32(index);
+ tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
+ tx_crq.v1.sge_len = cpu_to_be32(skb->len);
+ tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
+
+ if (adapter->vlan_header_insertion) {
+ tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
+ tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
+ }
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ if (ip_hdr(skb)->version == 4)
+ tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
+ else if (ip_hdr(skb)->version == 6)
+ tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
+
+ if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+ tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
+ else if (ip_hdr(skb)->protocol != IPPROTO_TCP)
+ tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
+ }
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
+
+ lpar_rc = send_subcrq(adapter, handle_array[0], &tx_crq);
+
+ if (lpar_rc != H_SUCCESS) {
+ dev_err(dev, "tx failed with code %ld\n", lpar_rc);
+
+ if (tx_pool->consumer_index == 0)
+ tx_pool->consumer_index =
+ adapter->max_tx_entries_per_subcrq - 1;
+ else
+ tx_pool->consumer_index--;
+
+ tx_send_failed++;
+ tx_dropped++;
+ ret = NETDEV_TX_BUSY;
+ goto out;
+ }
+ tx_packets++;
+ tx_bytes += skb->len;
+ txq->trans_start = jiffies;
+ ret = NETDEV_TX_OK;
+
+out:
+ netdev->stats.tx_dropped += tx_dropped;
+ netdev->stats.tx_bytes += tx_bytes;
+ netdev->stats.tx_packets += tx_packets;
+ adapter->tx_send_failed += tx_send_failed;
+ adapter->tx_map_failed += tx_map_failed;
+
+ return ret;
+}
+
+static void ibmvnic_set_multi(struct net_device *netdev)
+{
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ struct netdev_hw_addr *ha;
+ union ibmvnic_crq crq;
+
+ memset(&crq, 0, sizeof(crq));
+ crq.request_capability.first = IBMVNIC_CRQ_CMD;
+ crq.request_capability.cmd = REQUEST_CAPABILITY;
+
+ if (netdev->flags & IFF_PROMISC) {
+ if (!adapter->promisc_supported)
+ return;
+ } else {
+ if (netdev->flags & IFF_ALLMULTI) {
+ /* Accept all multicast */
+ memset(&crq, 0, sizeof(crq));
+ crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
+ crq.multicast_ctrl.cmd = MULTICAST_CTRL;
+ crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
+ ibmvnic_send_crq(adapter, &crq);
+ } else if (netdev_mc_empty(netdev)) {
+ /* Reject all multicast */
+ memset(&crq, 0, sizeof(crq));
+ crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
+ crq.multicast_ctrl.cmd = MULTICAST_CTRL;
+ crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
+ ibmvnic_send_crq(adapter, &crq);
+ } else {
+ /* Accept one or more multicast(s) */
+ netdev_for_each_mc_addr(ha, netdev) {
+ memset(&crq, 0, sizeof(crq));
+ crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
+ crq.multicast_ctrl.cmd = MULTICAST_CTRL;
+ crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
+ ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
+ ha->addr);
+ ibmvnic_send_crq(adapter, &crq);
+ }
+ }
+ }
+}
+
+static int ibmvnic_set_mac(struct net_device *netdev, void *p)
+{
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+ union ibmvnic_crq crq;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memset(&crq, 0, sizeof(crq));
+ crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
+ crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
+ ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
+ ibmvnic_send_crq(adapter, &crq);
+ /* netdev->dev_addr is changed in handle_change_mac_rsp function */
+ return 0;
+}
+
+static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+
+ if (new_mtu > adapter->req_mtu || new_mtu < adapter->min_mtu)
+ return -EINVAL;
+
+ netdev->mtu = new_mtu;
+ return 0;
+}
+
+static void ibmvnic_tx_timeout(struct net_device *dev)
+{
+ struct ibmvnic_adapter *adapter = netdev_priv(dev);
+ int rc;
+
+ /* Adapter timed out, resetting it */
+ release_sub_crqs(adapter);
+ rc = ibmvnic_reset_crq(adapter);
+ if (rc)
+ dev_err(&adapter->vdev->dev, "Adapter timeout, reset failed\n");
+ else
+ ibmvnic_send_crq_init(adapter);
+}
+
+static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_rx_buff *rx_buff)
+{
+ struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
+
+ rx_buff->skb = NULL;
+
+ pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
+ pool->next_alloc = (pool->next_alloc + 1) % pool->size;
+
+ atomic_dec(&pool->available);
+}
+
+static int ibmvnic_poll(struct napi_struct *napi, int budget)
+{
+ struct net_device *netdev = napi->dev;
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ int scrq_num = (int)(napi - adapter->napi);
+ int frames_processed = 0;
+restart_poll:
+ while (frames_processed < budget) {
+ struct sk_buff *skb;
+ struct ibmvnic_rx_buff *rx_buff;
+ union sub_crq *next;
+ u32 length;
+ u16 offset;
+ u8 flags = 0;
+
+ if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
+ break;
+ next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
+ rx_buff =
+ (struct ibmvnic_rx_buff *)be64_to_cpu(next->
+ rx_comp.correlator);
+ /* do error checking */
+ if (next->rx_comp.rc) {
+ netdev_err(netdev, "rx error %x\n", next->rx_comp.rc);
+ /* free the entry */
+ next->rx_comp.first = 0;
+ remove_buff_from_pool(adapter, rx_buff);
+ break;
+ }
+
+ length = be32_to_cpu(next->rx_comp.len);
+ offset = be16_to_cpu(next->rx_comp.off_frame_data);
+ flags = next->rx_comp.flags;
+ skb = rx_buff->skb;
+ skb_copy_to_linear_data(skb, rx_buff->data + offset,
+ length);
+ skb->vlan_tci = be16_to_cpu(next->rx_comp.vlan_tci);
+ /* free the entry */
+ next->rx_comp.first = 0;
+ remove_buff_from_pool(adapter, rx_buff);
+
+ skb_put(skb, length);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
+ flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+
+ length = skb->len;
+ napi_gro_receive(napi, skb); /* send it up */
+ netdev->stats.rx_packets++;
+ netdev->stats.rx_bytes += length;
+ frames_processed++;
+ }
+ replenish_pools(adapter);
+
+ if (frames_processed < budget) {
+ enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
+ napi_complete(napi);
+ if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
+ napi_reschedule(napi)) {
+ disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
+ goto restart_poll;
+ }
+ }
+ return frames_processed;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void ibmvnic_netpoll_controller(struct net_device *dev)
+{
+ struct ibmvnic_adapter *adapter = netdev_priv(dev);
+ int i;
+
+ replenish_pools(netdev_priv(dev));
+ for (i = 0; i < adapter->req_rx_queues; i++)
+ ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
+ adapter->rx_scrq[i]);
+}
+#endif
+
+static const struct net_device_ops ibmvnic_netdev_ops = {
+ .ndo_open = ibmvnic_open,
+ .ndo_stop = ibmvnic_close,
+ .ndo_start_xmit = ibmvnic_xmit,
+ .ndo_set_rx_mode = ibmvnic_set_multi,
+ .ndo_set_mac_address = ibmvnic_set_mac,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = ibmvnic_change_mtu,
+ .ndo_tx_timeout = ibmvnic_tx_timeout,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = ibmvnic_netpoll_controller,
+#endif
+};
+
+/* ethtool functions */
+
+static int ibmvnic_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *cmd)
+{
+ cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
+ SUPPORTED_FIBRE);
+ cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
+ ADVERTISED_FIBRE);
+ ethtool_cmd_speed_set(cmd, SPEED_1000);
+ cmd->duplex = DUPLEX_FULL;
+ cmd->port = PORT_FIBRE;
+ cmd->phy_address = 0;
+ cmd->transceiver = XCVR_INTERNAL;
+ cmd->autoneg = AUTONEG_ENABLE;
+ cmd->maxtxpkt = 0;
+ cmd->maxrxpkt = 1;
+ return 0;
+}
+
+static void ibmvnic_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
+ strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
+}
+
+static u32 ibmvnic_get_msglevel(struct net_device *netdev)
+{
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->msg_enable;
+}
+
+static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+
+ adapter->msg_enable = data;
+}
+
+static u32 ibmvnic_get_link(struct net_device *netdev)
+{
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+
+ /* Don't need to send a query because we request a logical link up at
+ * init and then we wait for link state indications
+ */
+ return adapter->logical_link_state;
+}
+
+static void ibmvnic_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ ring->rx_max_pending = 0;
+ ring->tx_max_pending = 0;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
+ ring->rx_pending = 0;
+ ring->tx_pending = 0;
+ ring->rx_mini_pending = 0;
+ ring->rx_jumbo_pending = 0;
+}
+
+static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
+ memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
+}
+
+static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(ibmvnic_stats);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void ibmvnic_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct ibmvnic_adapter *adapter = netdev_priv(dev);
+ union ibmvnic_crq crq;
+ int i;
+
+ memset(&crq, 0, sizeof(crq));
+ crq.request_statistics.first = IBMVNIC_CRQ_CMD;
+ crq.request_statistics.cmd = REQUEST_STATISTICS;
+ crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
+ crq.request_statistics.len =
+ cpu_to_be32(sizeof(struct ibmvnic_statistics));
+ ibmvnic_send_crq(adapter, &crq);
+
+ /* Wait for data to be written */
+ init_completion(&adapter->stats_done);
+ wait_for_completion(&adapter->stats_done);
+
+ for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
+ data[i] = IBMVNIC_GET_STAT(adapter, ibmvnic_stats[i].offset);
+}
+
+static const struct ethtool_ops ibmvnic_ethtool_ops = {
+ .get_settings = ibmvnic_get_settings,
+ .get_drvinfo = ibmvnic_get_drvinfo,
+ .get_msglevel = ibmvnic_get_msglevel,
+ .set_msglevel = ibmvnic_set_msglevel,
+ .get_link = ibmvnic_get_link,
+ .get_ringparam = ibmvnic_get_ringparam,
+ .get_strings = ibmvnic_get_strings,
+ .get_sset_count = ibmvnic_get_sset_count,
+ .get_ethtool_stats = ibmvnic_get_ethtool_stats,
+};
+
+/* Routines for managing CRQs/sCRQs */
+
+static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_sub_crq_queue *scrq)
+{
+ struct device *dev = &adapter->vdev->dev;
+ long rc;
+
+ netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
+
+ /* Close the sub-crqs */
+ do {
+ rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
+ adapter->vdev->unit_address,
+ scrq->crq_num);
+ } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
+
+ dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ free_pages((unsigned long)scrq->msgs, 2);
+ kfree(scrq);
+}
+
+static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
+ *adapter)
+{
+ struct device *dev = &adapter->vdev->dev;
+ struct ibmvnic_sub_crq_queue *scrq;
+ int rc;
+
+ scrq = kmalloc(sizeof(*scrq), GFP_ATOMIC);
+ if (!scrq)
+ return NULL;
+
+ scrq->msgs = (union sub_crq *)__get_free_pages(GFP_KERNEL, 2);
+ memset(scrq->msgs, 0, 4 * PAGE_SIZE);
+ if (!scrq->msgs) {
+ dev_warn(dev, "Couldn't allocate crq queue messages page\n");
+ goto zero_page_failed;
+ }
+
+ scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, scrq->msg_token)) {
+ dev_warn(dev, "Couldn't map crq queue messages page\n");
+ goto map_failed;
+ }
+
+ rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
+ 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
+
+ if (rc == H_RESOURCE)
+ rc = ibmvnic_reset_crq(adapter);
+
+ if (rc == H_CLOSED) {
+ dev_warn(dev, "Partner adapter not ready, waiting.\n");
+ } else if (rc) {
+ dev_warn(dev, "Error %d registering sub-crq\n", rc);
+ goto reg_failed;
+ }
+
+ scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
+ if (scrq->irq == NO_IRQ) {
+ dev_err(dev, "Error mapping irq\n");
+ goto map_irq_failed;
+ }
+
+ scrq->adapter = adapter;
+ scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
+ scrq->cur = 0;
+ scrq->rx_skb_top = NULL;
+ spin_lock_init(&scrq->lock);
+
+ netdev_dbg(adapter->netdev,
+ "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
+ scrq->crq_num, scrq->hw_irq, scrq->irq);
+
+ return scrq;
+
+map_irq_failed:
+ do {
+ rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
+ adapter->vdev->unit_address,
+ scrq->crq_num);
+ } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
+reg_failed:
+ dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+map_failed:
+ free_pages((unsigned long)scrq->msgs, 2);
+zero_page_failed:
+ kfree(scrq);
+
+ return NULL;
+}
+
+static void release_sub_crqs(struct ibmvnic_adapter *adapter)
+{
+ int i;
+
+ if (adapter->tx_scrq) {
+ for (i = 0; i < adapter->req_tx_queues; i++)
+ if (adapter->tx_scrq[i]) {
+ free_irq(adapter->tx_scrq[i]->irq,
+ adapter->tx_scrq[i]);
+ release_sub_crq_queue(adapter,
+ adapter->tx_scrq[i]);
+ }
+ adapter->tx_scrq = NULL;
+ }
+
+ if (adapter->rx_scrq) {
+ for (i = 0; i < adapter->req_rx_queues; i++)
+ if (adapter->rx_scrq[i]) {
+ free_irq(adapter->rx_scrq[i]->irq,
+ adapter->rx_scrq[i]);
+ release_sub_crq_queue(adapter,
+ adapter->rx_scrq[i]);
+ }
+ adapter->rx_scrq = NULL;
+ }
+
+ adapter->requested_caps = 0;
+}
+
+static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_sub_crq_queue *scrq)
+{
+ struct device *dev = &adapter->vdev->dev;
+ unsigned long rc;
+
+ rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
+ H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
+ if (rc)
+ dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
+ scrq->hw_irq, rc);
+ return rc;
+}
+
+static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_sub_crq_queue *scrq)
+{
+ struct device *dev = &adapter->vdev->dev;
+ unsigned long rc;
+
+ if (scrq->hw_irq > 0x100000000ULL) {
+ dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
+ return 1;
+ }
+
+ rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
+ H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
+ if (rc)
+ dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
+ scrq->hw_irq, rc);
+ return rc;
+}
+
+static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_sub_crq_queue *scrq)
+{
+ struct device *dev = &adapter->vdev->dev;
+ struct ibmvnic_tx_buff *txbuff;
+ union sub_crq *next;
+ int index;
+ int i, j;
+
+restart_loop:
+ while (pending_scrq(adapter, scrq)) {
+ unsigned int pool = scrq->pool_index;
+
+ next = ibmvnic_next_scrq(adapter, scrq);
+ for (i = 0; i < next->tx_comp.num_comps; i++) {
+ if (next->tx_comp.rcs[i]) {
+ dev_err(dev, "tx error %x\n",
+ next->tx_comp.rcs[i]);
+ continue;
+ }
+ index = be32_to_cpu(next->tx_comp.correlators[i]);
+ txbuff = &adapter->tx_pool[pool].tx_buff[index];
+
+ for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
+ if (!txbuff->data_dma[j])
+ continue;
+
+ txbuff->data_dma[j] = 0;
+ txbuff->used_bounce = false;
+ }
+
+ if (txbuff->last_frag)
+ dev_kfree_skb_any(txbuff->skb);
+
+ adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
+ producer_index] = index;
+ adapter->tx_pool[pool].producer_index =
+ (adapter->tx_pool[pool].producer_index + 1) %
+ adapter->max_tx_entries_per_subcrq;
+ }
+ /* remove tx_comp scrq*/
+ next->tx_comp.first = 0;
+ }
+
+ enable_scrq_irq(adapter, scrq);
+
+ if (pending_scrq(adapter, scrq)) {
+ disable_scrq_irq(adapter, scrq);
+ goto restart_loop;
+ }
+
+ return 0;
+}
+
+static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
+{
+ struct ibmvnic_sub_crq_queue *scrq = instance;
+ struct ibmvnic_adapter *adapter = scrq->adapter;
+
+ disable_scrq_irq(adapter, scrq);
+ ibmvnic_complete_tx(adapter, scrq);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
+{
+ struct ibmvnic_sub_crq_queue *scrq = instance;
+ struct ibmvnic_adapter *adapter = scrq->adapter;
+
+ if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
+ disable_scrq_irq(adapter, scrq);
+ __napi_schedule(&adapter->napi[scrq->scrq_num]);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
+{
+ struct device *dev = &adapter->vdev->dev;
+ struct ibmvnic_sub_crq_queue **allqueues;
+ int registered_queues = 0;
+ union ibmvnic_crq crq;
+ int total_queues;
+ int more = 0;
+ int i, j;
+ int rc;
+
+ if (!retry) {
+ /* Sub-CRQ entries are 32 byte long */
+ int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
+
+ if (adapter->min_tx_entries_per_subcrq > entries_page ||
+ adapter->min_rx_add_entries_per_subcrq > entries_page) {
+ dev_err(dev, "Fatal, invalid entries per sub-crq\n");
+ goto allqueues_failed;
+ }
+
+ /* Get the minimum between the queried max and the entries
+ * that fit in our PAGE_SIZE
+ */
+ adapter->req_tx_entries_per_subcrq =
+ adapter->max_tx_entries_per_subcrq > entries_page ?
+ entries_page : adapter->max_tx_entries_per_subcrq;
+ adapter->req_rx_add_entries_per_subcrq =
+ adapter->max_rx_add_entries_per_subcrq > entries_page ?
+ entries_page : adapter->max_rx_add_entries_per_subcrq;
+
+ /* Choosing the maximum number of queues supported by firmware*/
+ adapter->req_tx_queues = adapter->min_tx_queues;
+ adapter->req_rx_queues = adapter->min_rx_queues;
+ adapter->req_rx_add_queues = adapter->min_rx_add_queues;
+
+ adapter->req_mtu = adapter->max_mtu;
+ }
+
+ total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
+
+ allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_ATOMIC);
+ if (!allqueues)
+ goto allqueues_failed;
+
+ for (i = 0; i < total_queues; i++) {
+ allqueues[i] = init_sub_crq_queue(adapter);
+ if (!allqueues[i]) {
+ dev_warn(dev, "Couldn't allocate all sub-crqs\n");
+ break;
+ }
+ registered_queues++;
+ }
+
+ /* Make sure we were able to register the minimum number of queues */
+ if (registered_queues <
+ adapter->min_tx_queues + adapter->min_rx_queues) {
+ dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
+ goto tx_failed;
+ }
+
+ /* Distribute the failed allocated queues*/
+ for (i = 0; i < total_queues - registered_queues + more ; i++) {
+ netdev_dbg(adapter->netdev, "Reducing number of queues\n");
+ switch (i % 3) {
+ case 0:
+ if (adapter->req_rx_queues > adapter->min_rx_queues)
+ adapter->req_rx_queues--;
+ else
+ more++;
+ break;
+ case 1:
+ if (adapter->req_tx_queues > adapter->min_tx_queues)
+ adapter->req_tx_queues--;
+ else
+ more++;
+ break;
+ }
+ }
+
+ adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
+ sizeof(*adapter->tx_scrq), GFP_ATOMIC);
+ if (!adapter->tx_scrq)
+ goto tx_failed;
+
+ for (i = 0; i < adapter->req_tx_queues; i++) {
+ adapter->tx_scrq[i] = allqueues[i];
+ adapter->tx_scrq[i]->pool_index = i;
+ rc = request_irq(adapter->tx_scrq[i]->irq, ibmvnic_interrupt_tx,
+ 0, "ibmvnic_tx", adapter->tx_scrq[i]);
+ if (rc) {
+ dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
+ adapter->tx_scrq[i]->irq, rc);
+ goto req_tx_irq_failed;
+ }
+ }
+
+ adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
+ sizeof(*adapter->rx_scrq), GFP_ATOMIC);
+ if (!adapter->rx_scrq)
+ goto rx_failed;
+
+ for (i = 0; i < adapter->req_rx_queues; i++) {
+ adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
+ adapter->rx_scrq[i]->scrq_num = i;
+ rc = request_irq(adapter->rx_scrq[i]->irq, ibmvnic_interrupt_rx,
+ 0, "ibmvnic_rx", adapter->rx_scrq[i]);
+ if (rc) {
+ dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
+ adapter->rx_scrq[i]->irq, rc);
+ goto req_rx_irq_failed;
+ }
+ }
+
+ memset(&crq, 0, sizeof(crq));
+ crq.request_capability.first = IBMVNIC_CRQ_CMD;
+ crq.request_capability.cmd = REQUEST_CAPABILITY;
+
+ crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
+ crq.request_capability.number = cpu_to_be32(adapter->req_tx_queues);
+ ibmvnic_send_crq(adapter, &crq);
+
+ crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
+ crq.request_capability.number = cpu_to_be32(adapter->req_rx_queues);
+ ibmvnic_send_crq(adapter, &crq);
+
+ crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
+ crq.request_capability.number = cpu_to_be32(adapter->req_rx_add_queues);
+ ibmvnic_send_crq(adapter, &crq);
+
+ crq.request_capability.capability =
+ cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
+ crq.request_capability.number =
+ cpu_to_be32(adapter->req_tx_entries_per_subcrq);
+ ibmvnic_send_crq(adapter, &crq);
+
+ crq.request_capability.capability =
+ cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
+ crq.request_capability.number =
+ cpu_to_be32(adapter->req_rx_add_entries_per_subcrq);
+ ibmvnic_send_crq(adapter, &crq);
+
+ crq.request_capability.capability = cpu_to_be16(REQ_MTU);
+ crq.request_capability.number = cpu_to_be32(adapter->req_mtu);
+ ibmvnic_send_crq(adapter, &crq);
+
+ if (adapter->netdev->flags & IFF_PROMISC) {
+ if (adapter->promisc_supported) {
+ crq.request_capability.capability =
+ cpu_to_be16(PROMISC_REQUESTED);
+ crq.request_capability.number = cpu_to_be32(1);
+ ibmvnic_send_crq(adapter, &crq);
+ }
+ } else {
+ crq.request_capability.capability =
+ cpu_to_be16(PROMISC_REQUESTED);
+ crq.request_capability.number = cpu_to_be32(0);
+ ibmvnic_send_crq(adapter, &crq);
+ }
+
+ kfree(allqueues);
+
+ return;
+
+req_rx_irq_failed:
+ for (j = 0; j < i; j++)
+ free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
+ i = adapter->req_tx_queues;
+req_tx_irq_failed:
+ for (j = 0; j < i; j++)
+ free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
+ kfree(adapter->rx_scrq);
+ adapter->rx_scrq = NULL;
+rx_failed:
+ kfree(adapter->tx_scrq);
+ adapter->tx_scrq = NULL;
+tx_failed:
+ for (i = 0; i < registered_queues; i++)
+ release_sub_crq_queue(adapter, allqueues[i]);
+ kfree(allqueues);
+allqueues_failed:
+ ibmvnic_remove(adapter->vdev);
+}
+
+static int pending_scrq(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_sub_crq_queue *scrq)
+{
+ union sub_crq *entry = &scrq->msgs[scrq->cur];
+
+ if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP || adapter->closing)
+ return 1;
+ else
+ return 0;
+}
+
+static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_sub_crq_queue *scrq)
+{
+ union sub_crq *entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(&scrq->lock, flags);
+ entry = &scrq->msgs[scrq->cur];
+ if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
+ if (++scrq->cur == scrq->size)
+ scrq->cur = 0;
+ } else {
+ entry = NULL;
+ }
+ spin_unlock_irqrestore(&scrq->lock, flags);
+
+ return entry;
+}
+
+static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
+{
+ struct ibmvnic_crq_queue *queue = &adapter->crq;
+ union ibmvnic_crq *crq;
+
+ crq = &queue->msgs[queue->cur];
+ if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
+ if (++queue->cur == queue->size)
+ queue->cur = 0;
+ } else {
+ crq = NULL;
+ }
+
+ return crq;
+}
+
+static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
+ union sub_crq *sub_crq)
+{
+ unsigned int ua = adapter->vdev->unit_address;
+ struct device *dev = &adapter->vdev->dev;
+ u64 *u64_crq = (u64 *)sub_crq;
+ int rc;
+
+ netdev_dbg(adapter->netdev,
+ "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
+ (unsigned long int)cpu_to_be64(remote_handle),
+ (unsigned long int)cpu_to_be64(u64_crq[0]),
+ (unsigned long int)cpu_to_be64(u64_crq[1]),
+ (unsigned long int)cpu_to_be64(u64_crq[2]),
+ (unsigned long int)cpu_to_be64(u64_crq[3]));
+
+ /* Make sure the hypervisor sees the complete request */
+ mb();
+
+ rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
+ cpu_to_be64(remote_handle),
+ cpu_to_be64(u64_crq[0]),
+ cpu_to_be64(u64_crq[1]),
+ cpu_to_be64(u64_crq[2]),
+ cpu_to_be64(u64_crq[3]));
+
+ if (rc) {
+ if (rc == H_CLOSED)
+ dev_warn(dev, "CRQ Queue closed\n");
+ dev_err(dev, "Send error (rc=%d)\n", rc);
+ }
+
+ return rc;
+}
+
+static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
+ union ibmvnic_crq *crq)
+{
+ unsigned int ua = adapter->vdev->unit_address;
+ struct device *dev = &adapter->vdev->dev;
+ u64 *u64_crq = (u64 *)crq;
+ int rc;
+
+ netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
+ (unsigned long int)cpu_to_be64(u64_crq[0]),
+ (unsigned long int)cpu_to_be64(u64_crq[1]));
+
+ /* Make sure the hypervisor sees the complete request */
+ mb();
+
+ rc = plpar_hcall_norets(H_SEND_CRQ, ua,
+ cpu_to_be64(u64_crq[0]),
+ cpu_to_be64(u64_crq[1]));
+
+ if (rc) {
+ if (rc == H_CLOSED)
+ dev_warn(dev, "CRQ Queue closed\n");
+ dev_warn(dev, "Send error (rc=%d)\n", rc);
+ }
+
+ return rc;
+}
+
+static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
+{
+ union ibmvnic_crq crq;
+
+ memset(&crq, 0, sizeof(crq));
+ crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
+ crq.generic.cmd = IBMVNIC_CRQ_INIT;
+ netdev_dbg(adapter->netdev, "Sending CRQ init\n");
+
+ return ibmvnic_send_crq(adapter, &crq);
+}
+
+static int ibmvnic_send_crq_init_complete(struct ibmvnic_adapter *adapter)
+{
+ union ibmvnic_crq crq;
+
+ memset(&crq, 0, sizeof(crq));
+ crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
+ crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE;
+ netdev_dbg(adapter->netdev, "Sending CRQ init complete\n");
+
+ return ibmvnic_send_crq(adapter, &crq);
+}
+
+static int send_version_xchg(struct ibmvnic_adapter *adapter)
+{
+ union ibmvnic_crq crq;
+
+ memset(&crq, 0, sizeof(crq));
+ crq.version_exchange.first = IBMVNIC_CRQ_CMD;
+ crq.version_exchange.cmd = VERSION_EXCHANGE;
+ crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
+
+ return ibmvnic_send_crq(adapter, &crq);
+}
+
+static void send_login(struct ibmvnic_adapter *adapter)
+{
+ struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
+ struct ibmvnic_login_buffer *login_buffer;
+ struct ibmvnic_inflight_cmd *inflight_cmd;
+ struct device *dev = &adapter->vdev->dev;
+ dma_addr_t rsp_buffer_token;
+ dma_addr_t buffer_token;
+ size_t rsp_buffer_size;
+ union ibmvnic_crq crq;
+ unsigned long flags;
+ size_t buffer_size;
+ __be64 *tx_list_p;
+ __be64 *rx_list_p;
+ int i;
+
+ buffer_size =
+ sizeof(struct ibmvnic_login_buffer) +
+ sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues);
+
+ login_buffer = kmalloc(buffer_size, GFP_ATOMIC);
+ if (!login_buffer)
+ goto buf_alloc_failed;
+
+ buffer_token = dma_map_single(dev, login_buffer, buffer_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, buffer_token)) {
+ dev_err(dev, "Couldn't map login buffer\n");
+ goto buf_map_failed;
+ }
+
+ rsp_buffer_size =
+ sizeof(struct ibmvnic_login_rsp_buffer) +
+ sizeof(u64) * (adapter->req_tx_queues +
+ adapter->req_rx_queues *
+ adapter->req_rx_add_queues + adapter->
+ req_rx_add_queues) +
+ sizeof(u8) * (IBMVNIC_TX_DESC_VERSIONS);
+
+ login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
+ if (!login_rsp_buffer)
+ goto buf_rsp_alloc_failed;
+
+ rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
+ rsp_buffer_size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, rsp_buffer_token)) {
+ dev_err(dev, "Couldn't map login rsp buffer\n");
+ goto buf_rsp_map_failed;
+ }
+ inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
+ if (!inflight_cmd) {
+ dev_err(dev, "Couldn't allocate inflight_cmd\n");
+ goto inflight_alloc_failed;
+ }
+ adapter->login_buf = login_buffer;
+ adapter->login_buf_token = buffer_token;
+ adapter->login_buf_sz = buffer_size;
+ adapter->login_rsp_buf = login_rsp_buffer;
+ adapter->login_rsp_buf_token = rsp_buffer_token;
+ adapter->login_rsp_buf_sz = rsp_buffer_size;
+
+ login_buffer->len = cpu_to_be32(buffer_size);
+ login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
+ login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
+ login_buffer->off_txcomp_subcrqs =
+ cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
+ login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
+ login_buffer->off_rxcomp_subcrqs =
+ cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
+ sizeof(u64) * adapter->req_tx_queues);
+ login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
+ login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
+
+ tx_list_p = (__be64 *)((char *)login_buffer +
+ sizeof(struct ibmvnic_login_buffer));
+ rx_list_p = (__be64 *)((char *)login_buffer +
+ sizeof(struct ibmvnic_login_buffer) +
+ sizeof(u64) * adapter->req_tx_queues);
+
+ for (i = 0; i < adapter->req_tx_queues; i++) {
+ if (adapter->tx_scrq[i]) {
+ tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
+ crq_num);
+ }
+ }
+
+ for (i = 0; i < adapter->req_rx_queues; i++) {
+ if (adapter->rx_scrq[i]) {
+ rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
+ crq_num);
+ }
+ }
+
+ netdev_dbg(adapter->netdev, "Login Buffer:\n");
+ for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
+ netdev_dbg(adapter->netdev, "%016lx\n",
+ ((unsigned long int *)(adapter->login_buf))[i]);
+ }
+
+ memset(&crq, 0, sizeof(crq));
+ crq.login.first = IBMVNIC_CRQ_CMD;
+ crq.login.cmd = LOGIN;
+ crq.login.ioba = cpu_to_be32(buffer_token);
+ crq.login.len = cpu_to_be32(buffer_size);
+
+ memcpy(&inflight_cmd->crq, &crq, sizeof(crq));
+
+ spin_lock_irqsave(&adapter->inflight_lock, flags);
+ list_add_tail(&inflight_cmd->list, &adapter->inflight);
+ spin_unlock_irqrestore(&adapter->inflight_lock, flags);
+
+ ibmvnic_send_crq(adapter, &crq);
+
+ return;
+
+inflight_alloc_failed:
+ dma_unmap_single(dev, rsp_buffer_token, rsp_buffer_size,
+ DMA_FROM_DEVICE);
+buf_rsp_map_failed:
+ kfree(login_rsp_buffer);
+buf_rsp_alloc_failed:
+ dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
+buf_map_failed:
+ kfree(login_buffer);
+buf_alloc_failed:
+ return;
+}
+
+static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
+ u32 len, u8 map_id)
+{
+ union ibmvnic_crq crq;
+
+ memset(&crq, 0, sizeof(crq));
+ crq.request_map.first = IBMVNIC_CRQ_CMD;
+ crq.request_map.cmd = REQUEST_MAP;
+ crq.request_map.map_id = map_id;
+ crq.request_map.ioba = cpu_to_be32(addr);
+ crq.request_map.len = cpu_to_be32(len);
+ ibmvnic_send_crq(adapter, &crq);
+}
+
+static void send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
+{
+ union ibmvnic_crq crq;
+
+ memset(&crq, 0, sizeof(crq));
+ crq.request_unmap.first = IBMVNIC_CRQ_CMD;
+ crq.request_unmap.cmd = REQUEST_UNMAP;
+ crq.request_unmap.map_id = map_id;
+ ibmvnic_send_crq(adapter, &crq);
+}
+
+static void send_map_query(struct ibmvnic_adapter *adapter)
+{
+ union ibmvnic_crq crq;
+
+ memset(&crq, 0, sizeof(crq));
+ crq.query_map.first = IBMVNIC_CRQ_CMD;
+ crq.query_map.cmd = QUERY_MAP;
+ ibmvnic_send_crq(adapter, &crq);
+}
+
+/* Send a series of CRQs requesting various capabilities of the VNIC server */
+static void send_cap_queries(struct ibmvnic_adapter *adapter)
+{
+ union ibmvnic_crq crq;
+
+ atomic_set(&adapter->running_cap_queries, 0);
+ memset(&crq, 0, sizeof(crq));
+ crq.query_capability.first = IBMVNIC_CRQ_CMD;
+ crq.query_capability.cmd = QUERY_CAPABILITY;
+
+ crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
+ atomic_inc(&adapter->running_cap_queries);
+ ibmvnic_send_crq(adapter, &crq);
+
+ crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
+ atomic_inc(&adapter->running_cap_queries);
+ ibmvnic_send_crq(adapter, &crq);
+
+ crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
+ atomic_inc(&adapter->running_cap_queries);
+ ibmvnic_send_crq(adapter, &crq);
+
+ crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
+ atomic_inc(&adapter->running_cap_queries);
+ ibmvnic_send_crq(adapter, &crq);
+
+ crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
+ atomic_inc(&adapter->running_cap_queries);
+ ibmvnic_send_crq(adapter, &crq);
+
+ crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
+ atomic_inc(&adapter->running_cap_queries);
+ ibmvnic_send_crq(adapter, &crq);
+
+ crq.query_capability.capability =
+ cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
+ atomic_inc(&adapter->running_cap_queries);
+ ibmvnic_send_crq(adapter, &crq);
+
+ crq.query_capability.capability =
+ cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
+ atomic_inc(&adapter->running_cap_queries);
+ ibmvnic_send_crq(adapter, &crq);
+
+ crq.query_capability.capability =
+ cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
+ atomic_inc(&adapter->running_cap_queries);
+ ibmvnic_send_crq(adapter, &crq);
+
+ crq.query_capability.capability =
+ cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
+ atomic_inc(&adapter->running_cap_queries);
+ ibmvnic_send_crq(adapter, &crq);
+
+ crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
+ atomic_inc(&adapter->running_cap_queries);
+ ibmvnic_send_crq(adapter, &crq);
+
+ crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
+ atomic_inc(&adapter->running_cap_queries);
+ ibmvnic_send_crq(adapter, &crq);
+
+ crq.query_capability.capability = cpu_to_be16(MIN_MTU);
+ atomic_inc(&adapter->running_cap_queries);
+ ibmvnic_send_crq(adapter, &crq);
+
+ crq.query_capability.capability = cpu_to_be16(MAX_MTU);
+ atomic_inc(&adapter->running_cap_queries);
+ ibmvnic_send_crq(adapter, &crq);
+
+ crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
+ atomic_inc(&adapter->running_cap_queries);
+ ibmvnic_send_crq(adapter, &crq);
+
+ crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
+ atomic_inc(&adapter->running_cap_queries);
+ ibmvnic_send_crq(adapter, &crq);
+
+ crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
+ atomic_inc(&adapter->running_cap_queries);
+ ibmvnic_send_crq(adapter, &crq);
+
+ crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
+ atomic_inc(&adapter->running_cap_queries);
+ ibmvnic_send_crq(adapter, &crq);
+
+ crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
+ atomic_inc(&adapter->running_cap_queries);
+ ibmvnic_send_crq(adapter, &crq);
+
+ crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
+ atomic_inc(&adapter->running_cap_queries);
+ ibmvnic_send_crq(adapter, &crq);
+
+ crq.query_capability.capability =
+ cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
+ atomic_inc(&adapter->running_cap_queries);
+ ibmvnic_send_crq(adapter, &crq);
+
+ crq.query_capability.capability =
+ cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
+ atomic_inc(&adapter->running_cap_queries);
+ ibmvnic_send_crq(adapter, &crq);
+
+ crq.query_capability.capability =
+ cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
+ atomic_inc(&adapter->running_cap_queries);
+ ibmvnic_send_crq(adapter, &crq);
+
+ crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
+ atomic_inc(&adapter->running_cap_queries);
+ ibmvnic_send_crq(adapter, &crq);
+}
+
+static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
+{
+ struct device *dev = &adapter->vdev->dev;
+ struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
+ union ibmvnic_crq crq;
+ int i;
+
+ dma_unmap_single(dev, adapter->ip_offload_tok,
+ sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
+
+ netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
+ for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
+ netdev_dbg(adapter->netdev, "%016lx\n",
+ ((unsigned long int *)(buf))[i]);
+
+ netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
+ netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
+ netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
+ buf->tcp_ipv4_chksum);
+ netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
+ buf->tcp_ipv6_chksum);
+ netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
+ buf->udp_ipv4_chksum);
+ netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
+ buf->udp_ipv6_chksum);
+ netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
+ buf->large_tx_ipv4);
+ netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
+ buf->large_tx_ipv6);
+ netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
+ buf->large_rx_ipv4);
+ netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
+ buf->large_rx_ipv6);
+ netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
+ buf->max_ipv4_header_size);
+ netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
+ buf->max_ipv6_header_size);
+ netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
+ buf->max_tcp_header_size);
+ netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
+ buf->max_udp_header_size);
+ netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
+ buf->max_large_tx_size);
+ netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
+ buf->max_large_rx_size);
+ netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
+ buf->ipv6_extension_header);
+ netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
+ buf->tcp_pseudosum_req);
+ netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
+ buf->num_ipv6_ext_headers);
+ netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
+ buf->off_ipv6_ext_headers);
+
+ adapter->ip_offload_ctrl_tok =
+ dma_map_single(dev, &adapter->ip_offload_ctrl,
+ sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
+
+ if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
+ dev_err(dev, "Couldn't map ip offload control buffer\n");
+ return;
+ }
+
+ adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
+ adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
+ adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
+ adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
+ adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
+
+ /* large_tx/rx disabled for now, additional features needed */
+ adapter->ip_offload_ctrl.large_tx_ipv4 = 0;
+ adapter->ip_offload_ctrl.large_tx_ipv6 = 0;
+ adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
+ adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
+
+ adapter->netdev->features = NETIF_F_GSO;
+
+ if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
+ adapter->netdev->features |= NETIF_F_IP_CSUM;
+
+ if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
+ adapter->netdev->features |= NETIF_F_IPV6_CSUM;
+
+ memset(&crq, 0, sizeof(crq));
+ crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
+ crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
+ crq.control_ip_offload.len =
+ cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
+ crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
+ ibmvnic_send_crq(adapter, &crq);
+}
+
+static void handle_error_info_rsp(union ibmvnic_crq *crq,
+ struct ibmvnic_adapter *adapter)
+{
+ struct device *dev = &adapter->vdev->dev;
+ struct ibmvnic_error_buff *error_buff;
+ unsigned long flags;
+ bool found = false;
+ int i;
+
+ if (!crq->request_error_rsp.rc.code) {
+ dev_info(dev, "Request Error Rsp returned with rc=%x\n",
+ crq->request_error_rsp.rc.code);
+ return;
+ }
+
+ spin_lock_irqsave(&adapter->error_list_lock, flags);
+ list_for_each_entry(error_buff, &adapter->errors, list)
+ if (error_buff->error_id == crq->request_error_rsp.error_id) {
+ found = true;
+ list_del(&error_buff->list);
+ break;
+ }
+ spin_unlock_irqrestore(&adapter->error_list_lock, flags);
+
+ if (!found) {
+ dev_err(dev, "Couldn't find error id %x\n",
+ crq->request_error_rsp.error_id);
+ return;
+ }
+
+ dev_err(dev, "Detailed info for error id %x:",
+ crq->request_error_rsp.error_id);
+
+ for (i = 0; i < error_buff->len; i++) {
+ pr_cont("%02x", (int)error_buff->buff[i]);
+ if (i % 8 == 7)
+ pr_cont(" ");
+ }
+ pr_cont("\n");
+
+ dma_unmap_single(dev, error_buff->dma, error_buff->len,
+ DMA_FROM_DEVICE);
+ kfree(error_buff->buff);
+ kfree(error_buff);
+}
+
+static void handle_dump_size_rsp(union ibmvnic_crq *crq,
+ struct ibmvnic_adapter *adapter)
+{
+ int len = be32_to_cpu(crq->request_dump_size_rsp.len);
+ struct ibmvnic_inflight_cmd *inflight_cmd;
+ struct device *dev = &adapter->vdev->dev;
+ union ibmvnic_crq newcrq;
+ unsigned long flags;
+
+ /* allocate and map buffer */
+ adapter->dump_data = kmalloc(len, GFP_KERNEL);
+ if (!adapter->dump_data) {
+ complete(&adapter->fw_done);
+ return;
+ }
+
+ adapter->dump_data_token = dma_map_single(dev, adapter->dump_data, len,
+ DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(dev, adapter->dump_data_token)) {
+ if (!firmware_has_feature(FW_FEATURE_CMO))
+ dev_err(dev, "Couldn't map dump data\n");
+ kfree(adapter->dump_data);
+ complete(&adapter->fw_done);
+ return;
+ }
+
+ inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
+ if (!inflight_cmd) {
+ dma_unmap_single(dev, adapter->dump_data_token, len,
+ DMA_FROM_DEVICE);
+ kfree(adapter->dump_data);
+ complete(&adapter->fw_done);
+ return;
+ }
+
+ memset(&newcrq, 0, sizeof(newcrq));
+ newcrq.request_dump.first = IBMVNIC_CRQ_CMD;
+ newcrq.request_dump.cmd = REQUEST_DUMP;
+ newcrq.request_dump.ioba = cpu_to_be32(adapter->dump_data_token);
+ newcrq.request_dump.len = cpu_to_be32(adapter->dump_data_size);
+
+ memcpy(&inflight_cmd->crq, &newcrq, sizeof(newcrq));
+
+ spin_lock_irqsave(&adapter->inflight_lock, flags);
+ list_add_tail(&inflight_cmd->list, &adapter->inflight);
+ spin_unlock_irqrestore(&adapter->inflight_lock, flags);
+
+ ibmvnic_send_crq(adapter, &newcrq);
+}
+
+static void handle_error_indication(union ibmvnic_crq *crq,
+ struct ibmvnic_adapter *adapter)
+{
+ int detail_len = be32_to_cpu(crq->error_indication.detail_error_sz);
+ struct ibmvnic_inflight_cmd *inflight_cmd;
+ struct device *dev = &adapter->vdev->dev;
+ struct ibmvnic_error_buff *error_buff;
+ union ibmvnic_crq new_crq;
+ unsigned long flags;
+
+ dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
+ crq->error_indication.
+ flags & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
+ crq->error_indication.error_id,
+ crq->error_indication.error_cause);
+
+ error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
+ if (!error_buff)
+ return;
+
+ error_buff->buff = kmalloc(detail_len, GFP_ATOMIC);
+ if (!error_buff->buff) {
+ kfree(error_buff);
+ return;
+ }
+
+ error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, error_buff->dma)) {
+ if (!firmware_has_feature(FW_FEATURE_CMO))
+ dev_err(dev, "Couldn't map error buffer\n");
+ kfree(error_buff->buff);
+ kfree(error_buff);
+ return;
+ }
+
+ inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
+ if (!inflight_cmd) {
+ dma_unmap_single(dev, error_buff->dma, detail_len,
+ DMA_FROM_DEVICE);
+ kfree(error_buff->buff);
+ kfree(error_buff);
+ return;
+ }
+
+ error_buff->len = detail_len;
+ error_buff->error_id = crq->error_indication.error_id;
+
+ spin_lock_irqsave(&adapter->error_list_lock, flags);
+ list_add_tail(&error_buff->list, &adapter->errors);
+ spin_unlock_irqrestore(&adapter->error_list_lock, flags);
+
+ memset(&new_crq, 0, sizeof(new_crq));
+ new_crq.request_error_info.first = IBMVNIC_CRQ_CMD;
+ new_crq.request_error_info.cmd = REQUEST_ERROR_INFO;
+ new_crq.request_error_info.ioba = cpu_to_be32(error_buff->dma);
+ new_crq.request_error_info.len = cpu_to_be32(detail_len);
+ new_crq.request_error_info.error_id = crq->error_indication.error_id;
+
+ memcpy(&inflight_cmd->crq, &crq, sizeof(crq));
+
+ spin_lock_irqsave(&adapter->inflight_lock, flags);
+ list_add_tail(&inflight_cmd->list, &adapter->inflight);
+ spin_unlock_irqrestore(&adapter->inflight_lock, flags);
+
+ ibmvnic_send_crq(adapter, &new_crq);
+}
+
+static void handle_change_mac_rsp(union ibmvnic_crq *crq,
+ struct ibmvnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct device *dev = &adapter->vdev->dev;
+ long rc;
+
+ rc = crq->change_mac_addr_rsp.rc.code;
+ if (rc) {
+ dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
+ return;
+ }
+ memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0],
+ ETH_ALEN);
+}
+
+static void handle_request_cap_rsp(union ibmvnic_crq *crq,
+ struct ibmvnic_adapter *adapter)
+{
+ struct device *dev = &adapter->vdev->dev;
+ u64 *req_value;
+ char *name;
+
+ switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
+ case REQ_TX_QUEUES:
+ req_value = &adapter->req_tx_queues;
+ name = "tx";
+ break;
+ case REQ_RX_QUEUES:
+ req_value = &adapter->req_rx_queues;
+ name = "rx";
+ break;
+ case REQ_RX_ADD_QUEUES:
+ req_value = &adapter->req_rx_add_queues;
+ name = "rx_add";
+ break;
+ case REQ_TX_ENTRIES_PER_SUBCRQ:
+ req_value = &adapter->req_tx_entries_per_subcrq;
+ name = "tx_entries_per_subcrq";
+ break;
+ case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
+ req_value = &adapter->req_rx_add_entries_per_subcrq;
+ name = "rx_add_entries_per_subcrq";
+ break;
+ case REQ_MTU:
+ req_value = &adapter->req_mtu;
+ name = "mtu";
+ break;
+ case PROMISC_REQUESTED:
+ req_value = &adapter->promisc;
+ name = "promisc";
+ break;
+ default:
+ dev_err(dev, "Got invalid cap request rsp %d\n",
+ crq->request_capability.capability);
+ return;
+ }
+
+ switch (crq->request_capability_rsp.rc.code) {
+ case SUCCESS:
+ break;
+ case PARTIALSUCCESS:
+ dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
+ *req_value,
+ (long int)be32_to_cpu(crq->request_capability_rsp.
+ number), name);
+ release_sub_crqs(adapter);
+ *req_value = be32_to_cpu(crq->request_capability_rsp.number);
+ complete(&adapter->init_done);
+ return;
+ default:
+ dev_err(dev, "Error %d in request cap rsp\n",
+ crq->request_capability_rsp.rc.code);
+ return;
+ }
+
+ /* Done receiving requested capabilities, query IP offload support */
+ if (++adapter->requested_caps == 7) {
+ union ibmvnic_crq newcrq;
+ int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
+ struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
+ &adapter->ip_offload_buf;
+
+ adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
+ buf_sz,
+ DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
+ if (!firmware_has_feature(FW_FEATURE_CMO))
+ dev_err(dev, "Couldn't map offload buffer\n");
+ return;
+ }
+
+ memset(&newcrq, 0, sizeof(newcrq));
+ newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
+ newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
+ newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
+ newcrq.query_ip_offload.ioba =
+ cpu_to_be32(adapter->ip_offload_tok);
+
+ ibmvnic_send_crq(adapter, &newcrq);
+ }
+}
+
+static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
+ struct ibmvnic_adapter *adapter)
+{
+ struct device *dev = &adapter->vdev->dev;
+ struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
+ struct ibmvnic_login_buffer *login = adapter->login_buf;
+ union ibmvnic_crq crq;
+ int i;
+
+ dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
+ DMA_BIDIRECTIONAL);
+ dma_unmap_single(dev, adapter->login_rsp_buf_token,
+ adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
+
+ netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
+ for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
+ netdev_dbg(adapter->netdev, "%016lx\n",
+ ((unsigned long int *)(adapter->login_rsp_buf))[i]);
+ }
+
+ /* Sanity checks */
+ if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
+ (be32_to_cpu(login->num_rxcomp_subcrqs) *
+ adapter->req_rx_add_queues !=
+ be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
+ dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
+ ibmvnic_remove(adapter->vdev);
+ return -EIO;
+ }
+ complete(&adapter->init_done);
+
+ memset(&crq, 0, sizeof(crq));
+ crq.request_ras_comp_num.first = IBMVNIC_CRQ_CMD;
+ crq.request_ras_comp_num.cmd = REQUEST_RAS_COMP_NUM;
+ ibmvnic_send_crq(adapter, &crq);
+
+ return 0;
+}
+
+static void handle_request_map_rsp(union ibmvnic_crq *crq,
+ struct ibmvnic_adapter *adapter)
+{
+ struct device *dev = &adapter->vdev->dev;
+ u8 map_id = crq->request_map_rsp.map_id;
+ int tx_subcrqs;
+ int rx_subcrqs;
+ long rc;
+ int i;
+
+ tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
+ rx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
+
+ rc = crq->request_map_rsp.rc.code;
+ if (rc) {
+ dev_err(dev, "Error %ld in REQUEST_MAP_RSP\n", rc);
+ adapter->map_id--;
+ /* need to find and zero tx/rx_pool map_id */
+ for (i = 0; i < tx_subcrqs; i++) {
+ if (adapter->tx_pool[i].long_term_buff.map_id == map_id)
+ adapter->tx_pool[i].long_term_buff.map_id = 0;
+ }
+ for (i = 0; i < rx_subcrqs; i++) {
+ if (adapter->rx_pool[i].long_term_buff.map_id == map_id)
+ adapter->rx_pool[i].long_term_buff.map_id = 0;
+ }
+ }
+ complete(&adapter->fw_done);
+}
+
+static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
+ struct ibmvnic_adapter *adapter)
+{
+ struct device *dev = &adapter->vdev->dev;
+ long rc;
+
+ rc = crq->request_unmap_rsp.rc.code;
+ if (rc)
+ dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
+}
+
+static void handle_query_map_rsp(union ibmvnic_crq *crq,
+ struct ibmvnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct device *dev = &adapter->vdev->dev;
+ long rc;
+
+ rc = crq->query_map_rsp.rc.code;
+ if (rc) {
+ dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
+ return;
+ }
+ netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
+ crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
+ crq->query_map_rsp.free_pages);
+}
+
+static void handle_query_cap_rsp(union ibmvnic_crq *crq,
+ struct ibmvnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct device *dev = &adapter->vdev->dev;
+ long rc;
+
+ atomic_dec(&adapter->running_cap_queries);
+ netdev_dbg(netdev, "Outstanding queries: %d\n",
+ atomic_read(&adapter->running_cap_queries));
+ rc = crq->query_capability.rc.code;
+ if (rc) {
+ dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
+ goto out;
+ }
+
+ switch (be16_to_cpu(crq->query_capability.capability)) {
+ case MIN_TX_QUEUES:
+ adapter->min_tx_queues =
+ be32_to_cpu(crq->query_capability.number);
+ netdev_dbg(netdev, "min_tx_queues = %lld\n",
+ adapter->min_tx_queues);
+ break;
+ case MIN_RX_QUEUES:
+ adapter->min_rx_queues =
+ be32_to_cpu(crq->query_capability.number);
+ netdev_dbg(netdev, "min_rx_queues = %lld\n",
+ adapter->min_rx_queues);
+ break;
+ case MIN_RX_ADD_QUEUES:
+ adapter->min_rx_add_queues =
+ be32_to_cpu(crq->query_capability.number);
+ netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
+ adapter->min_rx_add_queues);
+ break;
+ case MAX_TX_QUEUES:
+ adapter->max_tx_queues =
+ be32_to_cpu(crq->query_capability.number);
+ netdev_dbg(netdev, "max_tx_queues = %lld\n",
+ adapter->max_tx_queues);
+ break;
+ case MAX_RX_QUEUES:
+ adapter->max_rx_queues =
+ be32_to_cpu(crq->query_capability.number);
+ netdev_dbg(netdev, "max_rx_queues = %lld\n",
+ adapter->max_rx_queues);
+ break;
+ case MAX_RX_ADD_QUEUES:
+ adapter->max_rx_add_queues =
+ be32_to_cpu(crq->query_capability.number);
+ netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
+ adapter->max_rx_add_queues);
+ break;
+ case MIN_TX_ENTRIES_PER_SUBCRQ:
+ adapter->min_tx_entries_per_subcrq =
+ be32_to_cpu(crq->query_capability.number);
+ netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
+ adapter->min_tx_entries_per_subcrq);
+ break;
+ case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
+ adapter->min_rx_add_entries_per_subcrq =
+ be32_to_cpu(crq->query_capability.number);
+ netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
+ adapter->min_rx_add_entries_per_subcrq);
+ break;
+ case MAX_TX_ENTRIES_PER_SUBCRQ:
+ adapter->max_tx_entries_per_subcrq =
+ be32_to_cpu(crq->query_capability.number);
+ netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
+ adapter->max_tx_entries_per_subcrq);
+ break;
+ case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
+ adapter->max_rx_add_entries_per_subcrq =
+ be32_to_cpu(crq->query_capability.number);
+ netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
+ adapter->max_rx_add_entries_per_subcrq);
+ break;
+ case TCP_IP_OFFLOAD:
+ adapter->tcp_ip_offload =
+ be32_to_cpu(crq->query_capability.number);
+ netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
+ adapter->tcp_ip_offload);
+ break;
+ case PROMISC_SUPPORTED:
+ adapter->promisc_supported =
+ be32_to_cpu(crq->query_capability.number);
+ netdev_dbg(netdev, "promisc_supported = %lld\n",
+ adapter->promisc_supported);
+ break;
+ case MIN_MTU:
+ adapter->min_mtu = be32_to_cpu(crq->query_capability.number);
+ netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
+ break;
+ case MAX_MTU:
+ adapter->max_mtu = be32_to_cpu(crq->query_capability.number);
+ netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
+ break;
+ case MAX_MULTICAST_FILTERS:
+ adapter->max_multicast_filters =
+ be32_to_cpu(crq->query_capability.number);
+ netdev_dbg(netdev, "max_multicast_filters = %lld\n",
+ adapter->max_multicast_filters);
+ break;
+ case VLAN_HEADER_INSERTION:
+ adapter->vlan_header_insertion =
+ be32_to_cpu(crq->query_capability.number);
+ if (adapter->vlan_header_insertion)
+ netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
+ netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
+ adapter->vlan_header_insertion);
+ break;
+ case MAX_TX_SG_ENTRIES:
+ adapter->max_tx_sg_entries =
+ be32_to_cpu(crq->query_capability.number);
+ netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
+ adapter->max_tx_sg_entries);
+ break;
+ case RX_SG_SUPPORTED:
+ adapter->rx_sg_supported =
+ be32_to_cpu(crq->query_capability.number);
+ netdev_dbg(netdev, "rx_sg_supported = %lld\n",
+ adapter->rx_sg_supported);
+ break;
+ case OPT_TX_COMP_SUB_QUEUES:
+ adapter->opt_tx_comp_sub_queues =
+ be32_to_cpu(crq->query_capability.number);
+ netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
+ adapter->opt_tx_comp_sub_queues);
+ break;
+ case OPT_RX_COMP_QUEUES:
+ adapter->opt_rx_comp_queues =
+ be32_to_cpu(crq->query_capability.number);
+ netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
+ adapter->opt_rx_comp_queues);
+ break;
+ case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
+ adapter->opt_rx_bufadd_q_per_rx_comp_q =
+ be32_to_cpu(crq->query_capability.number);
+ netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
+ adapter->opt_rx_bufadd_q_per_rx_comp_q);
+ break;
+ case OPT_TX_ENTRIES_PER_SUBCRQ:
+ adapter->opt_tx_entries_per_subcrq =
+ be32_to_cpu(crq->query_capability.number);
+ netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
+ adapter->opt_tx_entries_per_subcrq);
+ break;
+ case OPT_RXBA_ENTRIES_PER_SUBCRQ:
+ adapter->opt_rxba_entries_per_subcrq =
+ be32_to_cpu(crq->query_capability.number);
+ netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
+ adapter->opt_rxba_entries_per_subcrq);
+ break;
+ case TX_RX_DESC_REQ:
+ adapter->tx_rx_desc_req = crq->query_capability.number;
+ netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
+ adapter->tx_rx_desc_req);
+ break;
+
+ default:
+ netdev_err(netdev, "Got invalid cap rsp %d\n",
+ crq->query_capability.capability);
+ }
+
+out:
+ if (atomic_read(&adapter->running_cap_queries) == 0)
+ complete(&adapter->init_done);
+ /* We're done querying the capabilities, initialize sub-crqs */
+}
+
+static void handle_control_ras_rsp(union ibmvnic_crq *crq,
+ struct ibmvnic_adapter *adapter)
+{
+ u8 correlator = crq->control_ras_rsp.correlator;
+ struct device *dev = &adapter->vdev->dev;
+ bool found = false;
+ int i;
+
+ if (crq->control_ras_rsp.rc.code) {
+ dev_warn(dev, "Control ras failed rc=%d\n",
+ crq->control_ras_rsp.rc.code);
+ return;
+ }
+
+ for (i = 0; i < adapter->ras_comp_num; i++) {
+ if (adapter->ras_comps[i].correlator == correlator) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ dev_warn(dev, "Correlator not found on control_ras_rsp\n");
+ return;
+ }
+
+ switch (crq->control_ras_rsp.op) {
+ case IBMVNIC_TRACE_LEVEL:
+ adapter->ras_comps[i].trace_level = crq->control_ras.level;
+ break;
+ case IBMVNIC_ERROR_LEVEL:
+ adapter->ras_comps[i].error_check_level =
+ crq->control_ras.level;
+ break;
+ case IBMVNIC_TRACE_PAUSE:
+ adapter->ras_comp_int[i].paused = 1;
+ break;
+ case IBMVNIC_TRACE_RESUME:
+ adapter->ras_comp_int[i].paused = 0;
+ break;
+ case IBMVNIC_TRACE_ON:
+ adapter->ras_comps[i].trace_on = 1;
+ break;
+ case IBMVNIC_TRACE_OFF:
+ adapter->ras_comps[i].trace_on = 0;
+ break;
+ case IBMVNIC_CHG_TRACE_BUFF_SZ:
+ /* trace_buff_sz is 3 bytes, stuff it into an int */
+ ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[0] = 0;
+ ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[1] =
+ crq->control_ras_rsp.trace_buff_sz[0];
+ ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[2] =
+ crq->control_ras_rsp.trace_buff_sz[1];
+ ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[3] =
+ crq->control_ras_rsp.trace_buff_sz[2];
+ break;
+ default:
+ dev_err(dev, "invalid op %d on control_ras_rsp",
+ crq->control_ras_rsp.op);
+ }
+}
+
+static int ibmvnic_fw_comp_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t trace_read(struct file *file, char __user *user_buf, size_t len,
+ loff_t *ppos)
+{
+ struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
+ struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
+ struct device *dev = &adapter->vdev->dev;
+ struct ibmvnic_fw_trace_entry *trace;
+ int num = ras_comp_int->num;
+ union ibmvnic_crq crq;
+ dma_addr_t trace_tok;
+
+ if (*ppos >= be32_to_cpu(adapter->ras_comps[num].trace_buff_size))
+ return 0;
+
+ trace =
+ dma_alloc_coherent(dev,
+ be32_to_cpu(adapter->ras_comps[num].
+ trace_buff_size), &trace_tok,
+ GFP_KERNEL);
+ if (!trace) {
+ dev_err(dev, "Couldn't alloc trace buffer\n");
+ return 0;
+ }
+
+ memset(&crq, 0, sizeof(crq));
+ crq.collect_fw_trace.first = IBMVNIC_CRQ_CMD;
+ crq.collect_fw_trace.cmd = COLLECT_FW_TRACE;
+ crq.collect_fw_trace.correlator = adapter->ras_comps[num].correlator;
+ crq.collect_fw_trace.ioba = cpu_to_be32(trace_tok);
+ crq.collect_fw_trace.len = adapter->ras_comps[num].trace_buff_size;
+ ibmvnic_send_crq(adapter, &crq);
+
+ init_completion(&adapter->fw_done);
+ wait_for_completion(&adapter->fw_done);
+
+ if (*ppos + len > be32_to_cpu(adapter->ras_comps[num].trace_buff_size))
+ len =
+ be32_to_cpu(adapter->ras_comps[num].trace_buff_size) -
+ *ppos;
+
+ copy_to_user(user_buf, &((u8 *)trace)[*ppos], len);
+
+ dma_free_coherent(dev,
+ be32_to_cpu(adapter->ras_comps[num].trace_buff_size),
+ trace, trace_tok);
+ *ppos += len;
+ return len;
+}
+
+static const struct file_operations trace_ops = {
+ .owner = THIS_MODULE,
+ .open = ibmvnic_fw_comp_open,
+ .read = trace_read,
+};
+
+static ssize_t paused_read(struct file *file, char __user *user_buf, size_t len,
+ loff_t *ppos)
+{
+ struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
+ struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
+ int num = ras_comp_int->num;
+ char buff[5]; /* 1 or 0 plus \n and \0 */
+ int size;
+
+ size = sprintf(buff, "%d\n", adapter->ras_comp_int[num].paused);
+
+ if (*ppos >= size)
+ return 0;
+
+ copy_to_user(user_buf, buff, size);
+ *ppos += size;
+ return size;
+}
+
+static ssize_t paused_write(struct file *file, const char __user *user_buf,
+ size_t len, loff_t *ppos)
+{
+ struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
+ struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
+ int num = ras_comp_int->num;
+ union ibmvnic_crq crq;
+ unsigned long val;
+ char buff[9]; /* decimal max int plus \n and \0 */
+
+ copy_from_user(buff, user_buf, sizeof(buff));
+ val = kstrtoul(buff, 10, NULL);
+
+ adapter->ras_comp_int[num].paused = val ? 1 : 0;
+
+ memset(&crq, 0, sizeof(crq));
+ crq.control_ras.first = IBMVNIC_CRQ_CMD;
+ crq.control_ras.cmd = CONTROL_RAS;
+ crq.control_ras.correlator = adapter->ras_comps[num].correlator;
+ crq.control_ras.op = val ? IBMVNIC_TRACE_PAUSE : IBMVNIC_TRACE_RESUME;
+ ibmvnic_send_crq(adapter, &crq);
+
+ return len;
+}
+
+static const struct file_operations paused_ops = {
+ .owner = THIS_MODULE,
+ .open = ibmvnic_fw_comp_open,
+ .read = paused_read,
+ .write = paused_write,
+};
+
+static ssize_t tracing_read(struct file *file, char __user *user_buf,
+ size_t len, loff_t *ppos)
+{
+ struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
+ struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
+ int num = ras_comp_int->num;
+ char buff[5]; /* 1 or 0 plus \n and \0 */
+ int size;
+
+ size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_on);
+
+ if (*ppos >= size)
+ return 0;
+
+ copy_to_user(user_buf, buff, size);
+ *ppos += size;
+ return size;
+}
+
+static ssize_t tracing_write(struct file *file, const char __user *user_buf,
+ size_t len, loff_t *ppos)
+{
+ struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
+ struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
+ int num = ras_comp_int->num;
+ union ibmvnic_crq crq;
+ unsigned long val;
+ char buff[9]; /* decimal max int plus \n and \0 */
+
+ copy_from_user(buff, user_buf, sizeof(buff));
+ val = kstrtoul(buff, 10, NULL);
+
+ memset(&crq, 0, sizeof(crq));
+ crq.control_ras.first = IBMVNIC_CRQ_CMD;
+ crq.control_ras.cmd = CONTROL_RAS;
+ crq.control_ras.correlator = adapter->ras_comps[num].correlator;
+ crq.control_ras.op = val ? IBMVNIC_TRACE_ON : IBMVNIC_TRACE_OFF;
+
+ return len;
+}
+
+static const struct file_operations tracing_ops = {
+ .owner = THIS_MODULE,
+ .open = ibmvnic_fw_comp_open,
+ .read = tracing_read,
+ .write = tracing_write,
+};
+
+static ssize_t error_level_read(struct file *file, char __user *user_buf,
+ size_t len, loff_t *ppos)
+{
+ struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
+ struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
+ int num = ras_comp_int->num;
+ char buff[5]; /* decimal max char plus \n and \0 */
+ int size;
+
+ size = sprintf(buff, "%d\n", adapter->ras_comps[num].error_check_level);
+
+ if (*ppos >= size)
+ return 0;
+
+ copy_to_user(user_buf, buff, size);
+ *ppos += size;
+ return size;
+}
+
+static ssize_t error_level_write(struct file *file, const char __user *user_buf,
+ size_t len, loff_t *ppos)
+{
+ struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
+ struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
+ int num = ras_comp_int->num;
+ union ibmvnic_crq crq;
+ unsigned long val;
+ char buff[9]; /* decimal max int plus \n and \0 */
+
+ copy_from_user(buff, user_buf, sizeof(buff));
+ val = kstrtoul(buff, 10, NULL);
+
+ if (val > 9)
+ val = 9;
+
+ memset(&crq, 0, sizeof(crq));
+ crq.control_ras.first = IBMVNIC_CRQ_CMD;
+ crq.control_ras.cmd = CONTROL_RAS;
+ crq.control_ras.correlator = adapter->ras_comps[num].correlator;
+ crq.control_ras.op = IBMVNIC_ERROR_LEVEL;
+ crq.control_ras.level = val;
+ ibmvnic_send_crq(adapter, &crq);
+
+ return len;
+}
+
+static const struct file_operations error_level_ops = {
+ .owner = THIS_MODULE,
+ .open = ibmvnic_fw_comp_open,
+ .read = error_level_read,
+ .write = error_level_write,
+};
+
+static ssize_t trace_level_read(struct file *file, char __user *user_buf,
+ size_t len, loff_t *ppos)
+{
+ struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
+ struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
+ int num = ras_comp_int->num;
+ char buff[5]; /* decimal max char plus \n and \0 */
+ int size;
+
+ size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_level);
+ if (*ppos >= size)
+ return 0;
+
+ copy_to_user(user_buf, buff, size);
+ *ppos += size;
+ return size;
+}
+
+static ssize_t trace_level_write(struct file *file, const char __user *user_buf,
+ size_t len, loff_t *ppos)
+{
+ struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
+ struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
+ union ibmvnic_crq crq;
+ unsigned long val;
+ char buff[9]; /* decimal max int plus \n and \0 */
+
+ copy_from_user(buff, user_buf, sizeof(buff));
+ val = kstrtoul(buff, 10, NULL);
+ if (val > 9)
+ val = 9;
+
+ memset(&crq, 0, sizeof(crq));
+ crq.control_ras.first = IBMVNIC_CRQ_CMD;
+ crq.control_ras.cmd = CONTROL_RAS;
+ crq.control_ras.correlator =
+ adapter->ras_comps[ras_comp_int->num].correlator;
+ crq.control_ras.op = IBMVNIC_TRACE_LEVEL;
+ crq.control_ras.level = val;
+ ibmvnic_send_crq(adapter, &crq);
+
+ return len;
+}
+
+static const struct file_operations trace_level_ops = {
+ .owner = THIS_MODULE,
+ .open = ibmvnic_fw_comp_open,
+ .read = trace_level_read,
+ .write = trace_level_write,
+};
+
+static ssize_t trace_buff_size_read(struct file *file, char __user *user_buf,
+ size_t len, loff_t *ppos)
+{
+ struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
+ struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
+ int num = ras_comp_int->num;
+ char buff[9]; /* decimal max int plus \n and \0 */
+ int size;
+
+ size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_buff_size);
+ if (*ppos >= size)
+ return 0;
+
+ copy_to_user(user_buf, buff, size);
+ *ppos += size;
+ return size;
+}
+
+static ssize_t trace_buff_size_write(struct file *file,
+ const char __user *user_buf, size_t len,
+ loff_t *ppos)
+{
+ struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
+ struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
+ union ibmvnic_crq crq;
+ unsigned long val;
+ char buff[9]; /* decimal max int plus \n and \0 */
+
+ copy_from_user(buff, user_buf, sizeof(buff));
+ val = kstrtoul(buff, 10, NULL);
+
+ memset(&crq, 0, sizeof(crq));
+ crq.control_ras.first = IBMVNIC_CRQ_CMD;
+ crq.control_ras.cmd = CONTROL_RAS;
+ crq.control_ras.correlator =
+ adapter->ras_comps[ras_comp_int->num].correlator;
+ crq.control_ras.op = IBMVNIC_CHG_TRACE_BUFF_SZ;
+ /* trace_buff_sz is 3 bytes, stuff an int into it */
+ crq.control_ras.trace_buff_sz[0] = ((u8 *)(&val))[5];
+ crq.control_ras.trace_buff_sz[1] = ((u8 *)(&val))[6];
+ crq.control_ras.trace_buff_sz[2] = ((u8 *)(&val))[7];
+ ibmvnic_send_crq(adapter, &crq);
+
+ return len;
+}
+
+static const struct file_operations trace_size_ops = {
+ .owner = THIS_MODULE,
+ .open = ibmvnic_fw_comp_open,
+ .read = trace_buff_size_read,
+ .write = trace_buff_size_write,
+};
+
+static void handle_request_ras_comps_rsp(union ibmvnic_crq *crq,
+ struct ibmvnic_adapter *adapter)
+{
+ struct device *dev = &adapter->vdev->dev;
+ struct dentry *dir_ent;
+ struct dentry *ent;
+ int i;
+
+ debugfs_remove_recursive(adapter->ras_comps_ent);
+
+ adapter->ras_comps_ent = debugfs_create_dir("ras_comps",
+ adapter->debugfs_dir);
+ if (!adapter->ras_comps_ent || IS_ERR(adapter->ras_comps_ent)) {
+ dev_info(dev, "debugfs create ras_comps dir failed\n");
+ return;
+ }
+
+ for (i = 0; i < adapter->ras_comp_num; i++) {
+ dir_ent = debugfs_create_dir(adapter->ras_comps[i].name,
+ adapter->ras_comps_ent);
+ if (!dir_ent || IS_ERR(dir_ent)) {
+ dev_info(dev, "debugfs create %s dir failed\n",
+ adapter->ras_comps[i].name);
+ continue;
+ }
+
+ adapter->ras_comp_int[i].adapter = adapter;
+ adapter->ras_comp_int[i].num = i;
+ adapter->ras_comp_int[i].desc_blob.data =
+ &adapter->ras_comps[i].description;
+ adapter->ras_comp_int[i].desc_blob.size =
+ sizeof(adapter->ras_comps[i].description);
+
+ /* Don't need to remember the dentry's because the debugfs dir
+ * gets removed recursively
+ */
+ ent = debugfs_create_blob("description", S_IRUGO, dir_ent,
+ &adapter->ras_comp_int[i].desc_blob);
+ ent = debugfs_create_file("trace_buf_size", S_IRUGO | S_IWUSR,
+ dir_ent, &adapter->ras_comp_int[i],
+ &trace_size_ops);
+ ent = debugfs_create_file("trace_level",
+ S_IRUGO |
+ (adapter->ras_comps[i].trace_level !=
+ 0xFF ? S_IWUSR : 0),
+ dir_ent, &adapter->ras_comp_int[i],
+ &trace_level_ops);
+ ent = debugfs_create_file("error_level",
+ S_IRUGO |
+ (adapter->
+ ras_comps[i].error_check_level !=
+ 0xFF ? S_IWUSR : 0),
+ dir_ent, &adapter->ras_comp_int[i],
+ &trace_level_ops);
+ ent = debugfs_create_file("tracing", S_IRUGO | S_IWUSR,
+ dir_ent, &adapter->ras_comp_int[i],
+ &tracing_ops);
+ ent = debugfs_create_file("paused", S_IRUGO | S_IWUSR,
+ dir_ent, &adapter->ras_comp_int[i],
+ &paused_ops);
+ ent = debugfs_create_file("trace", S_IRUGO, dir_ent,
+ &adapter->ras_comp_int[i],
+ &trace_ops);
+ }
+}
+
+static void handle_request_ras_comp_num_rsp(union ibmvnic_crq *crq,
+ struct ibmvnic_adapter *adapter)
+{
+ int len = adapter->ras_comp_num * sizeof(struct ibmvnic_fw_component);
+ struct device *dev = &adapter->vdev->dev;
+ union ibmvnic_crq newcrq;
+
+ adapter->ras_comps = dma_alloc_coherent(dev, len,
+ &adapter->ras_comps_tok,
+ GFP_KERNEL);
+ if (!adapter->ras_comps) {
+ if (!firmware_has_feature(FW_FEATURE_CMO))
+ dev_err(dev, "Couldn't alloc fw comps buffer\n");
+ return;
+ }
+
+ adapter->ras_comp_int = kmalloc(adapter->ras_comp_num *
+ sizeof(struct ibmvnic_fw_comp_internal),
+ GFP_KERNEL);
+ if (!adapter->ras_comp_int)
+ dma_free_coherent(dev, len, adapter->ras_comps,
+ adapter->ras_comps_tok);
+
+ memset(&newcrq, 0, sizeof(newcrq));
+ newcrq.request_ras_comps.first = IBMVNIC_CRQ_CMD;
+ newcrq.request_ras_comps.cmd = REQUEST_RAS_COMPS;
+ newcrq.request_ras_comps.ioba = cpu_to_be32(adapter->ras_comps_tok);
+ newcrq.request_ras_comps.len = cpu_to_be32(len);
+ ibmvnic_send_crq(adapter, &newcrq);
+}
+
+static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
+{
+ struct ibmvnic_inflight_cmd *inflight_cmd;
+ struct device *dev = &adapter->vdev->dev;
+ struct ibmvnic_error_buff *error_buff;
+ unsigned long flags;
+ unsigned long flags2;
+
+ spin_lock_irqsave(&adapter->inflight_lock, flags);
+ list_for_each_entry(inflight_cmd, &adapter->inflight, list) {
+ switch (inflight_cmd->crq.generic.cmd) {
+ case LOGIN:
+ dma_unmap_single(dev, adapter->login_buf_token,
+ adapter->login_buf_sz,
+ DMA_BIDIRECTIONAL);
+ dma_unmap_single(dev, adapter->login_rsp_buf_token,
+ adapter->login_rsp_buf_sz,
+ DMA_BIDIRECTIONAL);
+ kfree(adapter->login_rsp_buf);
+ kfree(adapter->login_buf);
+ break;
+ case REQUEST_DUMP:
+ complete(&adapter->fw_done);
+ break;
+ case REQUEST_ERROR_INFO:
+ spin_lock_irqsave(&adapter->error_list_lock, flags2);
+ list_for_each_entry(error_buff, &adapter->errors,
+ list) {
+ dma_unmap_single(dev, error_buff->dma,
+ error_buff->len,
+ DMA_FROM_DEVICE);
+ kfree(error_buff->buff);
+ list_del(&error_buff->list);
+ kfree(error_buff);
+ }
+ spin_unlock_irqrestore(&adapter->error_list_lock,
+ flags2);
+ break;
+ }
+ list_del(&inflight_cmd->list);
+ kfree(inflight_cmd);
+ }
+ spin_unlock_irqrestore(&adapter->inflight_lock, flags);
+}
+
+static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
+ struct ibmvnic_adapter *adapter)
+{
+ struct ibmvnic_generic_crq *gen_crq = &crq->generic;
+ struct net_device *netdev = adapter->netdev;
+ struct device *dev = &adapter->vdev->dev;
+ long rc;
+
+ netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
+ ((unsigned long int *)crq)[0],
+ ((unsigned long int *)crq)[1]);
+ switch (gen_crq->first) {
+ case IBMVNIC_CRQ_INIT_RSP:
+ switch (gen_crq->cmd) {
+ case IBMVNIC_CRQ_INIT:
+ dev_info(dev, "Partner initialized\n");
+ /* Send back a response */
+ rc = ibmvnic_send_crq_init_complete(adapter);
+ if (rc == 0)
+ send_version_xchg(adapter);
+ else
+ dev_err(dev, "Can't send initrsp rc=%ld\n", rc);
+ break;
+ case IBMVNIC_CRQ_INIT_COMPLETE:
+ dev_info(dev, "Partner initialization complete\n");
+ send_version_xchg(adapter);
+ break;
+ default:
+ dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
+ }
+ return;
+ case IBMVNIC_CRQ_XPORT_EVENT:
+ if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
+ dev_info(dev, "Re-enabling adapter\n");
+ adapter->migrated = true;
+ ibmvnic_free_inflight(adapter);
+ release_sub_crqs(adapter);
+ rc = ibmvnic_reenable_crq_queue(adapter);
+ if (rc)
+ dev_err(dev, "Error after enable rc=%ld\n", rc);
+ adapter->migrated = false;
+ rc = ibmvnic_send_crq_init(adapter);
+ if (rc)
+ dev_err(dev, "Error sending init rc=%ld\n", rc);
+ } else {
+ /* The adapter lost the connection */
+ dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
+ gen_crq->cmd);
+ ibmvnic_free_inflight(adapter);
+ release_sub_crqs(adapter);
+ }
+ return;
+ case IBMVNIC_CRQ_CMD_RSP:
+ break;
+ default:
+ dev_err(dev, "Got an invalid msg type 0x%02x\n",
+ gen_crq->first);
+ return;
+ }
+
+ switch (gen_crq->cmd) {
+ case VERSION_EXCHANGE_RSP:
+ rc = crq->version_exchange_rsp.rc.code;
+ if (rc) {
+ dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
+ break;
+ }
+ dev_info(dev, "Partner protocol version is %d\n",
+ crq->version_exchange_rsp.version);
+ if (be16_to_cpu(crq->version_exchange_rsp.version) <
+ ibmvnic_version)
+ ibmvnic_version =
+ be16_to_cpu(crq->version_exchange_rsp.version);
+ send_cap_queries(adapter);
+ break;
+ case QUERY_CAPABILITY_RSP:
+ handle_query_cap_rsp(crq, adapter);
+ break;
+ case QUERY_MAP_RSP:
+ handle_query_map_rsp(crq, adapter);
+ break;
+ case REQUEST_MAP_RSP:
+ handle_request_map_rsp(crq, adapter);
+ break;
+ case REQUEST_UNMAP_RSP:
+ handle_request_unmap_rsp(crq, adapter);
+ break;
+ case REQUEST_CAPABILITY_RSP:
+ handle_request_cap_rsp(crq, adapter);
+ break;
+ case LOGIN_RSP:
+ netdev_dbg(netdev, "Got Login Response\n");
+ handle_login_rsp(crq, adapter);
+ break;
+ case LOGICAL_LINK_STATE_RSP:
+ netdev_dbg(netdev, "Got Logical Link State Response\n");
+ adapter->logical_link_state =
+ crq->logical_link_state_rsp.link_state;
+ break;
+ case LINK_STATE_INDICATION:
+ netdev_dbg(netdev, "Got Logical Link State Indication\n");
+ adapter->phys_link_state =
+ crq->link_state_indication.phys_link_state;
+ adapter->logical_link_state =
+ crq->link_state_indication.logical_link_state;
+ break;
+ case CHANGE_MAC_ADDR_RSP:
+ netdev_dbg(netdev, "Got MAC address change Response\n");
+ handle_change_mac_rsp(crq, adapter);
+ break;
+ case ERROR_INDICATION:
+ netdev_dbg(netdev, "Got Error Indication\n");
+ handle_error_indication(crq, adapter);
+ break;
+ case REQUEST_ERROR_RSP:
+ netdev_dbg(netdev, "Got Error Detail Response\n");
+ handle_error_info_rsp(crq, adapter);
+ break;
+ case REQUEST_STATISTICS_RSP:
+ netdev_dbg(netdev, "Got Statistics Response\n");
+ complete(&adapter->stats_done);
+ break;
+ case REQUEST_DUMP_SIZE_RSP:
+ netdev_dbg(netdev, "Got Request Dump Size Response\n");
+ handle_dump_size_rsp(crq, adapter);
+ break;
+ case REQUEST_DUMP_RSP:
+ netdev_dbg(netdev, "Got Request Dump Response\n");
+ complete(&adapter->fw_done);
+ break;
+ case QUERY_IP_OFFLOAD_RSP:
+ netdev_dbg(netdev, "Got Query IP offload Response\n");
+ handle_query_ip_offload_rsp(adapter);
+ break;
+ case MULTICAST_CTRL_RSP:
+ netdev_dbg(netdev, "Got multicast control Response\n");
+ break;
+ case CONTROL_IP_OFFLOAD_RSP:
+ netdev_dbg(netdev, "Got Control IP offload Response\n");
+ dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
+ sizeof(adapter->ip_offload_ctrl),
+ DMA_TO_DEVICE);
+ /* We're done with the queries, perform the login */
+ send_login(adapter);
+ break;
+ case REQUEST_RAS_COMP_NUM_RSP:
+ netdev_dbg(netdev, "Got Request RAS Comp Num Response\n");
+ if (crq->request_ras_comp_num_rsp.rc.code == 10) {
+ netdev_dbg(netdev, "Request RAS Comp Num not supported\n");
+ break;
+ }
+ adapter->ras_comp_num =
+ be32_to_cpu(crq->request_ras_comp_num_rsp.num_components);
+ handle_request_ras_comp_num_rsp(crq, adapter);
+ break;
+ case REQUEST_RAS_COMPS_RSP:
+ netdev_dbg(netdev, "Got Request RAS Comps Response\n");
+ handle_request_ras_comps_rsp(crq, adapter);
+ break;
+ case CONTROL_RAS_RSP:
+ netdev_dbg(netdev, "Got Control RAS Response\n");
+ handle_control_ras_rsp(crq, adapter);
+ break;
+ case COLLECT_FW_TRACE_RSP:
+ netdev_dbg(netdev, "Got Collect firmware trace Response\n");
+ complete(&adapter->fw_done);
+ break;
+ default:
+ netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
+ gen_crq->cmd);
+ }
+}
+
+static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
+{
+ struct ibmvnic_adapter *adapter = instance;
+ struct ibmvnic_crq_queue *queue = &adapter->crq;
+ struct vio_dev *vdev = adapter->vdev;
+ union ibmvnic_crq *crq;
+ unsigned long flags;
+ bool done = false;
+
+ spin_lock_irqsave(&queue->lock, flags);
+ vio_disable_interrupts(vdev);
+ while (!done) {
+ /* Pull all the valid messages off the CRQ */
+ while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
+ ibmvnic_handle_crq(crq, adapter);
+ crq->generic.first = 0;
+ }
+ vio_enable_interrupts(vdev);
+ crq = ibmvnic_next_crq(adapter);
+ if (crq) {
+ vio_disable_interrupts(vdev);
+ ibmvnic_handle_crq(crq, adapter);
+ crq->generic.first = 0;
+ } else {
+ done = true;
+ }
+ }
+ spin_unlock_irqrestore(&queue->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
+{
+ struct vio_dev *vdev = adapter->vdev;
+ int rc;
+
+ do {
+ rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
+ } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
+
+ if (rc)
+ dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
+
+ return rc;
+}
+
+static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
+{
+ struct ibmvnic_crq_queue *crq = &adapter->crq;
+ struct device *dev = &adapter->vdev->dev;
+ struct vio_dev *vdev = adapter->vdev;
+ int rc;
+
+ /* Close the CRQ */
+ do {
+ rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
+ } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
+
+ /* Clean out the queue */
+ memset(crq->msgs, 0, PAGE_SIZE);
+ crq->cur = 0;
+
+ /* And re-open it again */
+ rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
+ crq->msg_token, PAGE_SIZE);
+
+ if (rc == H_CLOSED)
+ /* Adapter is good, but other end is not ready */
+ dev_warn(dev, "Partner adapter not ready\n");
+ else if (rc != 0)
+ dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
+
+ return rc;
+}
+
+static void ibmvnic_release_crq_queue(struct ibmvnic_adapter *adapter)
+{
+ struct ibmvnic_crq_queue *crq = &adapter->crq;
+ struct vio_dev *vdev = adapter->vdev;
+ long rc;
+
+ netdev_dbg(adapter->netdev, "Releasing CRQ\n");
+ free_irq(vdev->irq, adapter);
+ do {
+ rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
+ } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
+
+ dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ free_page((unsigned long)crq->msgs);
+}
+
+static int ibmvnic_init_crq_queue(struct ibmvnic_adapter *adapter)
+{
+ struct ibmvnic_crq_queue *crq = &adapter->crq;
+ struct device *dev = &adapter->vdev->dev;
+ struct vio_dev *vdev = adapter->vdev;
+ int rc, retrc = -ENOMEM;
+
+ crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
+ /* Should we allocate more than one page? */
+
+ if (!crq->msgs)
+ return -ENOMEM;
+
+ crq->size = PAGE_SIZE / sizeof(*crq->msgs);
+ crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, crq->msg_token))
+ goto map_failed;
+
+ rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
+ crq->msg_token, PAGE_SIZE);
+
+ if (rc == H_RESOURCE)
+ /* maybe kexecing and resource is busy. try a reset */
+ rc = ibmvnic_reset_crq(adapter);
+ retrc = rc;
+
+ if (rc == H_CLOSED) {
+ dev_warn(dev, "Partner adapter not ready\n");
+ } else if (rc) {
+ dev_warn(dev, "Error %d opening adapter\n", rc);
+ goto reg_crq_failed;
+ }
+
+ retrc = 0;
+
+ netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
+ rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME,
+ adapter);
+ if (rc) {
+ dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
+ vdev->irq, rc);
+ goto req_irq_failed;
+ }
+
+ rc = vio_enable_interrupts(vdev);
+ if (rc) {
+ dev_err(dev, "Error %d enabling interrupts\n", rc);
+ goto req_irq_failed;
+ }
+
+ crq->cur = 0;
+ spin_lock_init(&crq->lock);
+
+ return retrc;
+
+req_irq_failed:
+ do {
+ rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
+ } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
+reg_crq_failed:
+ dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
+map_failed:
+ free_page((unsigned long)crq->msgs);
+ return retrc;
+}
+
+/* debugfs for dump */
+static int ibmvnic_dump_show(struct seq_file *seq, void *v)
+{
+ struct net_device *netdev = seq->private;
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ struct device *dev = &adapter->vdev->dev;
+ union ibmvnic_crq crq;
+
+ memset(&crq, 0, sizeof(crq));
+ crq.request_dump_size.first = IBMVNIC_CRQ_CMD;
+ crq.request_dump_size.cmd = REQUEST_DUMP_SIZE;
+ ibmvnic_send_crq(adapter, &crq);
+
+ init_completion(&adapter->fw_done);
+ wait_for_completion(&adapter->fw_done);
+
+ seq_write(seq, adapter->dump_data, adapter->dump_data_size);
+
+ dma_unmap_single(dev, adapter->dump_data_token, adapter->dump_data_size,
+ DMA_BIDIRECTIONAL);
+
+ kfree(adapter->dump_data);
+
+ return 0;
+}
+
+static int ibmvnic_dump_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ibmvnic_dump_show, inode->i_private);
+}
+
+static const struct file_operations ibmvnic_dump_ops = {
+ .owner = THIS_MODULE,
+ .open = ibmvnic_dump_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
+{
+ struct ibmvnic_adapter *adapter;
+ struct net_device *netdev;
+ unsigned char *mac_addr_p;
+ struct dentry *ent;
+ char buf[16]; /* debugfs name buf */
+ int rc;
+
+ dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
+ dev->unit_address);
+
+ mac_addr_p = (unsigned char *)vio_get_attribute(dev,
+ VETH_MAC_ADDR, NULL);
+ if (!mac_addr_p) {
+ dev_err(&dev->dev,
+ "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
+ __FILE__, __LINE__);
+ return 0;
+ }
+
+ netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
+ IBMVNIC_MAX_TX_QUEUES);
+ if (!netdev)
+ return -ENOMEM;
+
+ adapter = netdev_priv(netdev);
+ dev_set_drvdata(&dev->dev, netdev);
+ adapter->vdev = dev;
+ adapter->netdev = netdev;
+
+ ether_addr_copy(adapter->mac_addr, mac_addr_p);
+ ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
+ netdev->irq = dev->irq;
+ netdev->netdev_ops = &ibmvnic_netdev_ops;
+ netdev->ethtool_ops = &ibmvnic_ethtool_ops;
+ SET_NETDEV_DEV(netdev, &dev->dev);
+
+ spin_lock_init(&adapter->stats_lock);
+
+ rc = ibmvnic_init_crq_queue(adapter);
+ if (rc) {
+ dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n", rc);
+ goto free_netdev;
+ }
+
+ INIT_LIST_HEAD(&adapter->errors);
+ INIT_LIST_HEAD(&adapter->inflight);
+ spin_lock_init(&adapter->error_list_lock);
+ spin_lock_init(&adapter->inflight_lock);
+
+ adapter->stats_token = dma_map_single(&dev->dev, &adapter->stats,
+ sizeof(struct ibmvnic_statistics),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&dev->dev, adapter->stats_token)) {
+ if (!firmware_has_feature(FW_FEATURE_CMO))
+ dev_err(&dev->dev, "Couldn't map stats buffer\n");
+ goto free_crq;
+ }
+
+ snprintf(buf, sizeof(buf), "ibmvnic_%x", dev->unit_address);
+ ent = debugfs_create_dir(buf, NULL);
+ if (!ent || IS_ERR(ent)) {
+ dev_info(&dev->dev, "debugfs create directory failed\n");
+ adapter->debugfs_dir = NULL;
+ } else {
+ adapter->debugfs_dir = ent;
+ ent = debugfs_create_file("dump", S_IRUGO, adapter->debugfs_dir,
+ netdev, &ibmvnic_dump_ops);
+ if (!ent || IS_ERR(ent)) {
+ dev_info(&dev->dev,
+ "debugfs create dump file failed\n");
+ adapter->debugfs_dump = NULL;
+ } else {
+ adapter->debugfs_dump = ent;
+ }
+ }
+ ibmvnic_send_crq_init(adapter);
+
+ init_completion(&adapter->init_done);
+ wait_for_completion(&adapter->init_done);
+
+ /* needed to pull init_sub_crqs outside of an interrupt context
+ * because it creates IRQ mappings for the subCRQ queues, causing
+ * a kernel warning
+ */
+ init_sub_crqs(adapter, 0);
+
+ reinit_completion(&adapter->init_done);
+ wait_for_completion(&adapter->init_done);
+
+ /* if init_sub_crqs is partially successful, retry */
+ while (!adapter->tx_scrq || !adapter->rx_scrq) {
+ init_sub_crqs(adapter, 1);
+
+ reinit_completion(&adapter->init_done);
+ wait_for_completion(&adapter->init_done);
+ }
+
+ netdev->real_num_tx_queues = adapter->req_tx_queues;
+
+ rc = register_netdev(netdev);
+ if (rc) {
+ dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
+ goto free_debugfs;
+ }
+ dev_info(&dev->dev, "ibmvnic registered\n");
+
+ return 0;
+
+free_debugfs:
+ if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
+ debugfs_remove_recursive(adapter->debugfs_dir);
+free_crq:
+ ibmvnic_release_crq_queue(adapter);
+free_netdev:
+ free_netdev(netdev);
+ return rc;
+}
+
+static int ibmvnic_remove(struct vio_dev *dev)
+{
+ struct net_device *netdev = dev_get_drvdata(&dev->dev);
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+
+ unregister_netdev(netdev);
+
+ release_sub_crqs(adapter);
+
+ ibmvnic_release_crq_queue(adapter);
+
+ if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
+ debugfs_remove_recursive(adapter->debugfs_dir);
+
+ if (adapter->ras_comps)
+ dma_free_coherent(&dev->dev,
+ adapter->ras_comp_num *
+ sizeof(struct ibmvnic_fw_component),
+ adapter->ras_comps, adapter->ras_comps_tok);
+
+ kfree(adapter->ras_comp_int);
+
+ free_netdev(netdev);
+ dev_set_drvdata(&dev->dev, NULL);
+
+ return 0;
+}
+
+static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
+{
+ struct net_device *netdev = dev_get_drvdata(&vdev->dev);
+ struct ibmvnic_adapter *adapter;
+ struct iommu_table *tbl;
+ unsigned long ret = 0;
+ int i;
+
+ tbl = get_iommu_table_base(&vdev->dev);
+
+ /* netdev inits at probe time along with the structures we need below*/
+ if (!netdev)
+ return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
+
+ adapter = netdev_priv(netdev);
+
+ ret += PAGE_SIZE; /* the crq message queue */
+ ret += adapter->bounce_buffer_size;
+ ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
+
+ for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
+ ret += 4 * PAGE_SIZE; /* the scrq message queue */
+
+ for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
+ i++)
+ ret += adapter->rx_pool[i].size *
+ IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
+
+ return ret;
+}
+
+static int ibmvnic_resume(struct device *dev)
+{
+ struct net_device *netdev = dev_get_drvdata(dev);
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+ /* kick the interrupt handlers just in case we lost an interrupt */
+ for (i = 0; i < adapter->req_rx_queues; i++)
+ ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
+ adapter->rx_scrq[i]);
+
+ return 0;
+}
+
+static struct vio_device_id ibmvnic_device_table[] = {
+ {"network", "IBM,vnic"},
+ {"", "" }
+};
+MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
+
+static const struct dev_pm_ops ibmvnic_pm_ops = {
+ .resume = ibmvnic_resume
+};
+
+static struct vio_driver ibmvnic_driver = {
+ .id_table = ibmvnic_device_table,
+ .probe = ibmvnic_probe,
+ .remove = ibmvnic_remove,
+ .get_desired_dma = ibmvnic_get_desired_dma,
+ .name = ibmvnic_driver_name,
+ .pm = &ibmvnic_pm_ops,
+};
+
+/* module functions */
+static int __init ibmvnic_module_init(void)
+{
+ pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
+ IBMVNIC_DRIVER_VERSION);
+
+ return vio_register_driver(&ibmvnic_driver);
+}
+
+static void __exit ibmvnic_module_exit(void)
+{
+ vio_unregister_driver(&ibmvnic_driver);
+}
+
+module_init(ibmvnic_module_init);
+module_exit(ibmvnic_module_exit);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
new file mode 100644
index 0000000..1242925
--- /dev/null
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -0,0 +1,1046 @@
+/**************************************************************************/
+/* */
+/* IBM System i and System p Virtual NIC Device Driver */
+/* Copyright (C) 2014 IBM Corp. */
+/* Santiago Leon (santi_leon@yahoo.com) */
+/* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
+/* John Allen (jallen@linux.vnet.ibm.com) */
+/* */
+/* This program is free software; you can redistribute it and/or modify */
+/* it under the terms of the GNU General Public License as published by */
+/* the Free Software Foundation; either version 2 of the License, or */
+/* (at your option) any later version. */
+/* */
+/* This program is distributed in the hope that it will be useful, */
+/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
+/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
+/* GNU General Public License for more details. */
+/* */
+/* You should have received a copy of the GNU General Public License */
+/* along with this program. */
+/* */
+/* This module contains the implementation of a virtual ethernet device */
+/* for use with IBM i/pSeries LPAR Linux. It utilizes the logical LAN */
+/* option of the RS/6000 Platform Architecture to interface with virtual */
+/* ethernet NICs that are presented to the partition by the hypervisor. */
+/* */
+/**************************************************************************/
+
+#define IBMVNIC_NAME "ibmvnic"
+#define IBMVNIC_DRIVER_VERSION "1.0"
+#define IBMVNIC_INVALID_MAP -1
+#define IBMVNIC_STATS_TIMEOUT 1
+/* basic structures plus 100 2k buffers */
+#define IBMVNIC_IO_ENTITLEMENT_DEFAULT 610305
+
+/* Initial module_parameters */
+#define IBMVNIC_RX_WEIGHT 16
+/* when changing this, update IBMVNIC_IO_ENTITLEMENT_DEFAULT */
+#define IBMVNIC_BUFFS_PER_POOL 100
+#define IBMVNIC_MAX_TX_QUEUES 5
+
+struct ibmvnic_login_buffer {
+ __be32 len;
+ __be32 version;
+#define INITIAL_VERSION_LB 1
+ __be32 num_txcomp_subcrqs;
+ __be32 off_txcomp_subcrqs;
+ __be32 num_rxcomp_subcrqs;
+ __be32 off_rxcomp_subcrqs;
+ __be32 login_rsp_ioba;
+ __be32 login_rsp_len;
+} __packed __aligned(8);
+
+struct ibmvnic_login_rsp_buffer {
+ __be32 len;
+ __be32 version;
+#define INITIAL_VERSION_LRB 1
+ __be32 num_txsubm_subcrqs;
+ __be32 off_txsubm_subcrqs;
+ __be32 num_rxadd_subcrqs;
+ __be32 off_rxadd_subcrqs;
+ __be32 off_rxadd_buff_size;
+ __be32 num_supp_tx_desc;
+ __be32 off_supp_tx_desc;
+} __packed __aligned(8);
+
+struct ibmvnic_query_ip_offload_buffer {
+ __be32 len;
+ __be32 version;
+#define INITIAL_VERSION_IOB 1
+ u8 ipv4_chksum;
+ u8 ipv6_chksum;
+ u8 tcp_ipv4_chksum;
+ u8 tcp_ipv6_chksum;
+ u8 udp_ipv4_chksum;
+ u8 udp_ipv6_chksum;
+ u8 large_tx_ipv4;
+ u8 large_tx_ipv6;
+ u8 large_rx_ipv4;
+ u8 large_rx_ipv6;
+ u8 reserved1[14];
+ __be16 max_ipv4_header_size;
+ __be16 max_ipv6_header_size;
+ __be16 max_tcp_header_size;
+ __be16 max_udp_header_size;
+ __be32 max_large_tx_size;
+ __be32 max_large_rx_size;
+ u8 reserved2[16];
+ u8 ipv6_extension_header;
+#define IPV6_EH_NOT_SUPPORTED 0x00
+#define IPV6_EH_SUPPORTED_LIM 0x01
+#define IPV6_EH_SUPPORTED 0xFF
+ u8 tcp_pseudosum_req;
+#define TCP_PS_NOT_REQUIRED 0x00
+#define TCP_PS_REQUIRED 0x01
+ u8 reserved3[30];
+ __be16 num_ipv6_ext_headers;
+ __be32 off_ipv6_ext_headers;
+ u8 reserved4[154];
+} __packed __aligned(8);
+
+struct ibmvnic_control_ip_offload_buffer {
+ __be32 len;
+ __be32 version;
+#define INITIAL_VERSION_IOB 1
+ u8 ipv4_chksum;
+ u8 ipv6_chksum;
+ u8 tcp_ipv4_chksum;
+ u8 tcp_ipv6_chksum;
+ u8 udp_ipv4_chksum;
+ u8 udp_ipv6_chksum;
+ u8 large_tx_ipv4;
+ u8 large_tx_ipv6;
+ u8 bad_packet_rx;
+ u8 large_rx_ipv4;
+ u8 large_rx_ipv6;
+ u8 reserved4[111];
+} __packed __aligned(8);
+
+struct ibmvnic_fw_component {
+ u8 name[48];
+ __be32 trace_buff_size;
+ u8 correlator;
+ u8 trace_level;
+ u8 parent_correlator;
+ u8 error_check_level;
+ u8 trace_on;
+ u8 reserved[7];
+ u8 description[192];
+} __packed __aligned(8);
+
+struct ibmvnic_fw_trace_entry {
+ __be32 trace_id;
+ u8 num_valid_data;
+ u8 reserved[3];
+ __be64 pmc_registers;
+ __be64 timebase;
+ __be64 trace_data[5];
+} __packed __aligned(8);
+
+struct ibmvnic_statistics {
+ __be32 version;
+ __be32 promiscuous;
+ __be64 rx_packets;
+ __be64 rx_bytes;
+ __be64 tx_packets;
+ __be64 tx_bytes;
+ __be64 ucast_tx_packets;
+ __be64 ucast_rx_packets;
+ __be64 mcast_tx_packets;
+ __be64 mcast_rx_packets;
+ __be64 bcast_tx_packets;
+ __be64 bcast_rx_packets;
+ __be64 align_errors;
+ __be64 fcs_errors;
+ __be64 single_collision_frames;
+ __be64 multi_collision_frames;
+ __be64 sqe_test_errors;
+ __be64 deferred_tx;
+ __be64 late_collisions;
+ __be64 excess_collisions;
+ __be64 internal_mac_tx_errors;
+ __be64 carrier_sense;
+ __be64 too_long_frames;
+ __be64 internal_mac_rx_errors;
+ u8 reserved[72];
+} __packed __aligned(8);
+
+struct ibmvnic_acl_buffer {
+ __be32 len;
+ __be32 version;
+#define INITIAL_VERSION_IOB 1
+ u8 mac_acls_restrict;
+ u8 vlan_acls_restrict;
+ u8 reserved1[22];
+ __be32 num_mac_addrs;
+ __be32 offset_mac_addrs;
+ __be32 num_vlan_ids;
+ __be32 offset_vlan_ids;
+ u8 reserved2[80];
+} __packed __aligned(8);
+
+/* descriptors have been changed, how should this be defined? 1? 4? */
+
+#define IBMVNIC_TX_DESC_VERSIONS 3
+
+/* is this still needed? */
+struct ibmvnic_tx_comp_desc {
+ u8 first;
+ u8 num_comps;
+ __be16 rcs[5];
+ __be32 correlators[5];
+} __packed __aligned(8);
+
+/* some flags that included in v0 descriptor, which is gone
+ * only used for IBMVNIC_TCP_CHKSUM and IBMVNIC_UDP_CHKSUM
+ * and only in some offload_flags variable that doesn't seem
+ * to be used anywhere, can probably be removed?
+ */
+
+#define IBMVNIC_TCP_CHKSUM 0x20
+#define IBMVNIC_UDP_CHKSUM 0x08
+
+#define IBMVNIC_MAX_FRAGS_PER_CRQ 3
+
+struct ibmvnic_tx_desc {
+ u8 first;
+ u8 type;
+
+#define IBMVNIC_TX_DESC 0x10
+ u8 n_crq_elem;
+ u8 n_sge;
+ u8 flags1;
+#define IBMVNIC_TX_COMP_NEEDED 0x80
+#define IBMVNIC_TX_CHKSUM_OFFLOAD 0x40
+#define IBMVNIC_TX_LSO 0x20
+#define IBMVNIC_TX_PROT_TCP 0x10
+#define IBMVNIC_TX_PROT_UDP 0x08
+#define IBMVNIC_TX_PROT_IPV4 0x04
+#define IBMVNIC_TX_PROT_IPV6 0x02
+#define IBMVNIC_TX_VLAN_PRESENT 0x01
+ u8 flags2;
+#define IBMVNIC_TX_VLAN_INSERT 0x80
+ __be16 mss;
+ u8 reserved[4];
+ __be32 correlator;
+ __be16 vlan_id;
+ __be16 dma_reg;
+ __be32 sge_len;
+ __be64 ioba;
+} __packed __aligned(8);
+
+struct ibmvnic_hdr_desc {
+ u8 first;
+ u8 type;
+#define IBMVNIC_HDR_DESC 0x11
+ u8 len;
+ u8 l2_len;
+ __be16 l3_len;
+ u8 l4_len;
+ u8 flag;
+ u8 data[24];
+} __packed __aligned(8);
+
+struct ibmvnic_hdr_ext_desc {
+ u8 first;
+ u8 type;
+#define IBMVNIC_HDR_EXT_DESC 0x12
+ u8 len;
+ u8 data[29];
+} __packed __aligned(8);
+
+struct ibmvnic_sge_desc {
+ u8 first;
+ u8 type;
+#define IBMVNIC_SGE_DESC 0x30
+ __be16 sge1_dma_reg;
+ __be32 sge1_len;
+ __be64 sge1_ioba;
+ __be16 reserved;
+ __be16 sge2_dma_reg;
+ __be32 sge2_len;
+ __be64 sge2_ioba;
+} __packed __aligned(8);
+
+struct ibmvnic_rx_comp_desc {
+ u8 first;
+ u8 flags;
+#define IBMVNIC_IP_CHKSUM_GOOD 0x80
+#define IBMVNIC_TCP_UDP_CHKSUM_GOOD 0x40
+#define IBMVNIC_END_FRAME 0x20
+#define IBMVNIC_EXACT_MC 0x10
+#define IBMVNIC_VLAN_STRIPPED 0x08
+ __be16 off_frame_data;
+ __be32 len;
+ __be64 correlator;
+ __be16 vlan_tci;
+ __be16 rc;
+ u8 reserved[12];
+} __packed __aligned(8);
+
+struct ibmvnic_generic_scrq {
+ u8 first;
+ u8 reserved[31];
+} __packed __aligned(8);
+
+struct ibmvnic_rx_buff_add_desc {
+ u8 first;
+ u8 reserved[7];
+ __be64 correlator;
+ __be32 ioba;
+ u8 map_id;
+ __be32 len:24;
+ u8 reserved2[8];
+} __packed __aligned(8);
+
+struct ibmvnic_rc {
+ u8 code; /* one of enum ibmvnic_rc_codes */
+ u8 detailed_data[3];
+} __packed __aligned(4);
+
+struct ibmvnic_generic_crq {
+ u8 first;
+ u8 cmd;
+ u8 params[10];
+ struct ibmvnic_rc rc;
+} __packed __aligned(8);
+
+struct ibmvnic_version_exchange {
+ u8 first;
+ u8 cmd;
+ __be16 version;
+#define IBMVNIC_INITIAL_VERSION 1
+ u8 reserved[8];
+ struct ibmvnic_rc rc;
+} __packed __aligned(8);
+
+struct ibmvnic_capability {
+ u8 first;
+ u8 cmd;
+ __be16 capability; /* one of ibmvnic_capabilities */
+ struct ibmvnic_rc rc;
+ __be32 number; /*FIX: should be __be64, but I'm getting the least
+ * significant word first
+ */
+} __packed __aligned(8);
+
+struct ibmvnic_login {
+ u8 first;
+ u8 cmd;
+ u8 reserved[6];
+ __be32 ioba;
+ __be32 len;
+} __packed __aligned(8);
+
+struct ibmvnic_phys_parms {
+ u8 first;
+ u8 cmd;
+ u8 flags1;
+#define IBMVNIC_EXTERNAL_LOOPBACK 0x80
+#define IBMVNIC_INTERNAL_LOOPBACK 0x40
+#define IBMVNIC_PROMISC 0x20
+#define IBMVNIC_PHYS_LINK_ACTIVE 0x10
+#define IBMVNIC_AUTONEG_DUPLEX 0x08
+#define IBMVNIC_FULL_DUPLEX 0x04
+#define IBMVNIC_HALF_DUPLEX 0x02
+#define IBMVNIC_CAN_CHG_PHYS_PARMS 0x01
+ u8 flags2;
+#define IBMVNIC_LOGICAL_LNK_ACTIVE 0x80
+ __be32 speed;
+#define IBMVNIC_AUTONEG 0x80
+#define IBMVNIC_10MBPS 0x40
+#define IBMVNIC_100MBPS 0x20
+#define IBMVNIC_1GBPS 0x10
+#define IBMVNIC_10GBPS 0x08
+ __be32 mtu;
+ struct ibmvnic_rc rc;
+} __packed __aligned(8);
+
+struct ibmvnic_logical_link_state {
+ u8 first;
+ u8 cmd;
+ u8 link_state;
+#define IBMVNIC_LOGICAL_LNK_DN 0x00
+#define IBMVNIC_LOGICAL_LNK_UP 0x01
+#define IBMVNIC_LOGICAL_LNK_QUERY 0xff
+ u8 reserved[9];
+ struct ibmvnic_rc rc;
+} __packed __aligned(8);
+
+struct ibmvnic_query_ip_offload {
+ u8 first;
+ u8 cmd;
+ u8 reserved[2];
+ __be32 len;
+ __be32 ioba;
+ struct ibmvnic_rc rc;
+} __packed __aligned(8);
+
+struct ibmvnic_control_ip_offload {
+ u8 first;
+ u8 cmd;
+ u8 reserved[2];
+ __be32 ioba;
+ __be32 len;
+ struct ibmvnic_rc rc;
+} __packed __aligned(8);
+
+struct ibmvnic_request_dump_size {
+ u8 first;
+ u8 cmd;
+ u8 reserved[6];
+ __be32 len;
+ struct ibmvnic_rc rc;
+} __packed __aligned(8);
+
+struct ibmvnic_request_dump {
+ u8 first;
+ u8 cmd;
+ u8 reserved1[2];
+ __be32 ioba;
+ __be32 len;
+ u8 reserved2[4];
+} __packed __aligned(8);
+
+struct ibmvnic_request_dump_rsp {
+ u8 first;
+ u8 cmd;
+ u8 reserved[6];
+ __be32 dumped_len;
+ struct ibmvnic_rc rc;
+} __packed __aligned(8);
+
+struct ibmvnic_request_ras_comp_num {
+ u8 first;
+ u8 cmd;
+ u8 reserved1[2];
+ __be32 num_components;
+ u8 reserved2[4];
+ struct ibmvnic_rc rc;
+} __packed __aligned(8);
+
+struct ibmvnic_request_ras_comps {
+ u8 first;
+ u8 cmd;
+ u8 reserved[2];
+ __be32 ioba;
+ __be32 len;
+ struct ibmvnic_rc rc;
+} __packed __aligned(8);
+
+struct ibmvnic_control_ras {
+ u8 first;
+ u8 cmd;
+ u8 correlator;
+ u8 level;
+ u8 op;
+#define IBMVNIC_TRACE_LEVEL 1
+#define IBMVNIC_ERROR_LEVEL 2
+#define IBMVNIC_TRACE_PAUSE 3
+#define IBMVNIC_TRACE_RESUME 4
+#define IBMVNIC_TRACE_ON 5
+#define IBMVNIC_TRACE_OFF 6
+#define IBMVNIC_CHG_TRACE_BUFF_SZ 7
+ u8 trace_buff_sz[3];
+ u8 reserved[4];
+ struct ibmvnic_rc rc;
+} __packed __aligned(8);
+
+struct ibmvnic_collect_fw_trace {
+ u8 first;
+ u8 cmd;
+ u8 correlator;
+ u8 reserved;
+ __be32 ioba;
+ __be32 len;
+ struct ibmvnic_rc rc;
+} __packed __aligned(8);
+
+struct ibmvnic_request_statistics {
+ u8 first;
+ u8 cmd;
+ u8 flags;
+#define IBMVNIC_PHYSICAL_PORT 0x80
+ u8 reserved1;
+ __be32 ioba;
+ __be32 len;
+ u8 reserved[4];
+} __packed __aligned(8);
+
+struct ibmvnic_request_debug_stats {
+ u8 first;
+ u8 cmd;
+ u8 reserved[2];
+ __be32 ioba;
+ __be32 len;
+ struct ibmvnic_rc rc;
+} __packed __aligned(8);
+
+struct ibmvnic_error_indication {
+ u8 first;
+ u8 cmd;
+ u8 flags;
+#define IBMVNIC_FATAL_ERROR 0x80
+ u8 reserved1;
+ __be32 error_id;
+ __be32 detail_error_sz;
+ __be16 error_cause;
+ u8 reserved2[2];
+} __packed __aligned(8);
+
+struct ibmvnic_request_error_info {
+ u8 first;
+ u8 cmd;
+ u8 reserved[2];
+ __be32 ioba;
+ __be32 len;
+ __be32 error_id;
+} __packed __aligned(8);
+
+struct ibmvnic_request_error_rsp {
+ u8 first;
+ u8 cmd;
+ u8 reserved[2];
+ __be32 error_id;
+ __be32 len;
+ struct ibmvnic_rc rc;
+} __packed __aligned(8);
+
+struct ibmvnic_link_state_indication {
+ u8 first;
+ u8 cmd;
+ u8 reserved1[2];
+ u8 phys_link_state;
+ u8 logical_link_state;
+ u8 reserved2[10];
+} __packed __aligned(8);
+
+struct ibmvnic_change_mac_addr {
+ u8 first;
+ u8 cmd;
+ u8 mac_addr[6];
+ struct ibmvnic_rc rc;
+ u8 reserved[4];
+} __packed __aligned(8);
+
+struct ibmvnic_multicast_ctrl {
+ u8 first;
+ u8 cmd;
+ u8 mac_addr[6];
+ u8 flags;
+#define IBMVNIC_ENABLE_MC 0x80
+#define IBMVNIC_DISABLE_MC 0x40
+#define IBMVNIC_ENABLE_ALL 0x20
+#define IBMVNIC_DISABLE_ALL 0x10
+ u8 reserved1;
+ __be16 reserved2; /* was num_enabled_mc_addr; */
+ struct ibmvnic_rc rc;
+} __packed __aligned(8);
+
+struct ibmvnic_get_vpd_size_rsp {
+ u8 first;
+ u8 cmd;
+ u8 reserved[2];
+ __be64 len;
+ struct ibmvnic_rc rc;
+} __packed __aligned(8);
+
+struct ibmvnic_get_vpd {
+ u8 first;
+ u8 cmd;
+ u8 reserved1[2];
+ __be32 ioba;
+ __be32 len;
+ u8 reserved[4];
+} __packed __aligned(8);
+
+struct ibmvnic_acl_change_indication {
+ u8 first;
+ u8 cmd;
+ __be16 change_type;
+#define IBMVNIC_MAC_ACL 0
+#define IBMVNIC_VLAN_ACL 1
+ u8 reserved[12];
+} __packed __aligned(8);
+
+struct ibmvnic_acl_query {
+ u8 first;
+ u8 cmd;
+ u8 reserved1[2];
+ __be32 ioba;
+ __be32 len;
+ u8 reserved2[4];
+} __packed __aligned(8);
+
+struct ibmvnic_tune {
+ u8 first;
+ u8 cmd;
+ u8 reserved1[2];
+ __be32 ioba;
+ __be32 len;
+ u8 reserved2[4];
+} __packed __aligned(8);
+
+struct ibmvnic_request_map {
+ u8 first;
+ u8 cmd;
+ u8 reserved1;
+ u8 map_id;
+ __be32 ioba;
+ __be32 len;
+ u8 reserved2[4];
+} __packed __aligned(8);
+
+struct ibmvnic_request_map_rsp {
+ u8 first;
+ u8 cmd;
+ u8 reserved1;
+ u8 map_id;
+ u8 reserved2[4];
+ struct ibmvnic_rc rc;
+} __packed __aligned(8);
+
+struct ibmvnic_request_unmap {
+ u8 first;
+ u8 cmd;
+ u8 reserved1;
+ u8 map_id;
+ u8 reserved2[12];
+} __packed __aligned(8);
+
+struct ibmvnic_request_unmap_rsp {
+ u8 first;
+ u8 cmd;
+ u8 reserved1;
+ u8 map_id;
+ u8 reserved2[8];
+ struct ibmvnic_rc rc;
+} __packed __aligned(8);
+
+struct ibmvnic_query_map {
+ u8 first;
+ u8 cmd;
+ u8 reserved[14];
+} __packed __aligned(8);
+
+struct ibmvnic_query_map_rsp {
+ u8 first;
+ u8 cmd;
+ u8 reserved;
+ u8 page_size;
+ __be32 tot_pages;
+ __be32 free_pages;
+ struct ibmvnic_rc rc;
+} __packed __aligned(8);
+
+union ibmvnic_crq {
+ struct ibmvnic_generic_crq generic;
+ struct ibmvnic_version_exchange version_exchange;
+ struct ibmvnic_version_exchange version_exchange_rsp;
+ struct ibmvnic_capability query_capability;
+ struct ibmvnic_capability query_capability_rsp;
+ struct ibmvnic_capability request_capability;
+ struct ibmvnic_capability request_capability_rsp;
+ struct ibmvnic_login login;
+ struct ibmvnic_generic_crq login_rsp;
+ struct ibmvnic_phys_parms query_phys_parms;
+ struct ibmvnic_phys_parms query_phys_parms_rsp;
+ struct ibmvnic_phys_parms query_phys_capabilities;
+ struct ibmvnic_phys_parms query_phys_capabilities_rsp;
+ struct ibmvnic_phys_parms set_phys_parms;
+ struct ibmvnic_phys_parms set_phys_parms_rsp;
+ struct ibmvnic_logical_link_state logical_link_state;
+ struct ibmvnic_logical_link_state logical_link_state_rsp;
+ struct ibmvnic_query_ip_offload query_ip_offload;
+ struct ibmvnic_query_ip_offload query_ip_offload_rsp;
+ struct ibmvnic_control_ip_offload control_ip_offload;
+ struct ibmvnic_control_ip_offload control_ip_offload_rsp;
+ struct ibmvnic_request_dump_size request_dump_size;
+ struct ibmvnic_request_dump_size request_dump_size_rsp;
+ struct ibmvnic_request_dump request_dump;
+ struct ibmvnic_request_dump_rsp request_dump_rsp;
+ struct ibmvnic_request_ras_comp_num request_ras_comp_num;
+ struct ibmvnic_request_ras_comp_num request_ras_comp_num_rsp;
+ struct ibmvnic_request_ras_comps request_ras_comps;
+ struct ibmvnic_request_ras_comps request_ras_comps_rsp;
+ struct ibmvnic_control_ras control_ras;
+ struct ibmvnic_control_ras control_ras_rsp;
+ struct ibmvnic_collect_fw_trace collect_fw_trace;
+ struct ibmvnic_collect_fw_trace collect_fw_trace_rsp;
+ struct ibmvnic_request_statistics request_statistics;
+ struct ibmvnic_generic_crq request_statistics_rsp;
+ struct ibmvnic_request_debug_stats request_debug_stats;
+ struct ibmvnic_request_debug_stats request_debug_stats_rsp;
+ struct ibmvnic_error_indication error_indication;
+ struct ibmvnic_request_error_info request_error_info;
+ struct ibmvnic_request_error_rsp request_error_rsp;
+ struct ibmvnic_link_state_indication link_state_indication;
+ struct ibmvnic_change_mac_addr change_mac_addr;
+ struct ibmvnic_change_mac_addr change_mac_addr_rsp;
+ struct ibmvnic_multicast_ctrl multicast_ctrl;
+ struct ibmvnic_multicast_ctrl multicast_ctrl_rsp;
+ struct ibmvnic_generic_crq get_vpd_size;
+ struct ibmvnic_get_vpd_size_rsp get_vpd_size_rsp;
+ struct ibmvnic_get_vpd get_vpd;
+ struct ibmvnic_generic_crq get_vpd_rsp;
+ struct ibmvnic_acl_change_indication acl_change_indication;
+ struct ibmvnic_acl_query acl_query;
+ struct ibmvnic_generic_crq acl_query_rsp;
+ struct ibmvnic_tune tune;
+ struct ibmvnic_generic_crq tune_rsp;
+ struct ibmvnic_request_map request_map;
+ struct ibmvnic_request_map_rsp request_map_rsp;
+ struct ibmvnic_request_unmap request_unmap;
+ struct ibmvnic_request_unmap_rsp request_unmap_rsp;
+ struct ibmvnic_query_map query_map;
+ struct ibmvnic_query_map_rsp query_map_rsp;
+};
+
+enum ibmvnic_rc_codes {
+ SUCCESS = 0,
+ PARTIALSUCCESS = 1,
+ PERMISSION = 2,
+ NOMEMORY = 3,
+ PARAMETER = 4,
+ UNKNOWNCOMMAND = 5,
+ ABORTED = 6,
+ INVALIDSTATE = 7,
+ INVALIDIOBA = 8,
+ INVALIDLENGTH = 9,
+ UNSUPPORTEDOPTION = 10,
+};
+
+enum ibmvnic_capabilities {
+ MIN_TX_QUEUES = 1,
+ MIN_RX_QUEUES = 2,
+ MIN_RX_ADD_QUEUES = 3,
+ MAX_TX_QUEUES = 4,
+ MAX_RX_QUEUES = 5,
+ MAX_RX_ADD_QUEUES = 6,
+ REQ_TX_QUEUES = 7,
+ REQ_RX_QUEUES = 8,
+ REQ_RX_ADD_QUEUES = 9,
+ MIN_TX_ENTRIES_PER_SUBCRQ = 10,
+ MIN_RX_ADD_ENTRIES_PER_SUBCRQ = 11,
+ MAX_TX_ENTRIES_PER_SUBCRQ = 12,
+ MAX_RX_ADD_ENTRIES_PER_SUBCRQ = 13,
+ REQ_TX_ENTRIES_PER_SUBCRQ = 14,
+ REQ_RX_ADD_ENTRIES_PER_SUBCRQ = 15,
+ TCP_IP_OFFLOAD = 16,
+ PROMISC_REQUESTED = 17,
+ PROMISC_SUPPORTED = 18,
+ MIN_MTU = 19,
+ MAX_MTU = 20,
+ REQ_MTU = 21,
+ MAX_MULTICAST_FILTERS = 22,
+ VLAN_HEADER_INSERTION = 23,
+ MAX_TX_SG_ENTRIES = 25,
+ RX_SG_SUPPORTED = 26,
+ RX_SG_REQUESTED = 27,
+ OPT_TX_COMP_SUB_QUEUES = 28,
+ OPT_RX_COMP_QUEUES = 29,
+ OPT_RX_BUFADD_Q_PER_RX_COMP_Q = 30,
+ OPT_TX_ENTRIES_PER_SUBCRQ = 31,
+ OPT_RXBA_ENTRIES_PER_SUBCRQ = 32,
+ TX_RX_DESC_REQ = 33,
+};
+
+enum ibmvnic_error_cause {
+ ADAPTER_PROBLEM = 0,
+ BUS_PROBLEM = 1,
+ FW_PROBLEM = 2,
+ DD_PROBLEM = 3,
+ EEH_RECOVERY = 4,
+ FW_UPDATED = 5,
+ LOW_MEMORY = 6,
+};
+
+enum ibmvnic_commands {
+ VERSION_EXCHANGE = 0x01,
+ VERSION_EXCHANGE_RSP = 0x81,
+ QUERY_CAPABILITY = 0x02,
+ QUERY_CAPABILITY_RSP = 0x82,
+ REQUEST_CAPABILITY = 0x03,
+ REQUEST_CAPABILITY_RSP = 0x83,
+ LOGIN = 0x04,
+ LOGIN_RSP = 0x84,
+ QUERY_PHYS_PARMS = 0x05,
+ QUERY_PHYS_PARMS_RSP = 0x85,
+ QUERY_PHYS_CAPABILITIES = 0x06,
+ QUERY_PHYS_CAPABILITIES_RSP = 0x86,
+ SET_PHYS_PARMS = 0x07,
+ SET_PHYS_PARMS_RSP = 0x87,
+ ERROR_INDICATION = 0x08,
+ REQUEST_ERROR_INFO = 0x09,
+ REQUEST_ERROR_RSP = 0x89,
+ REQUEST_DUMP_SIZE = 0x0A,
+ REQUEST_DUMP_SIZE_RSP = 0x8A,
+ REQUEST_DUMP = 0x0B,
+ REQUEST_DUMP_RSP = 0x8B,
+ LOGICAL_LINK_STATE = 0x0C,
+ LOGICAL_LINK_STATE_RSP = 0x8C,
+ REQUEST_STATISTICS = 0x0D,
+ REQUEST_STATISTICS_RSP = 0x8D,
+ REQUEST_RAS_COMP_NUM = 0x0E,
+ REQUEST_RAS_COMP_NUM_RSP = 0x8E,
+ REQUEST_RAS_COMPS = 0x0F,
+ REQUEST_RAS_COMPS_RSP = 0x8F,
+ CONTROL_RAS = 0x10,
+ CONTROL_RAS_RSP = 0x90,
+ COLLECT_FW_TRACE = 0x11,
+ COLLECT_FW_TRACE_RSP = 0x91,
+ LINK_STATE_INDICATION = 0x12,
+ CHANGE_MAC_ADDR = 0x13,
+ CHANGE_MAC_ADDR_RSP = 0x93,
+ MULTICAST_CTRL = 0x14,
+ MULTICAST_CTRL_RSP = 0x94,
+ GET_VPD_SIZE = 0x15,
+ GET_VPD_SIZE_RSP = 0x95,
+ GET_VPD = 0x16,
+ GET_VPD_RSP = 0x96,
+ TUNE = 0x17,
+ TUNE_RSP = 0x97,
+ QUERY_IP_OFFLOAD = 0x18,
+ QUERY_IP_OFFLOAD_RSP = 0x98,
+ CONTROL_IP_OFFLOAD = 0x19,
+ CONTROL_IP_OFFLOAD_RSP = 0x99,
+ ACL_CHANGE_INDICATION = 0x1A,
+ ACL_QUERY = 0x1B,
+ ACL_QUERY_RSP = 0x9B,
+ REQUEST_DEBUG_STATS = 0x1C,
+ REQUEST_DEBUG_STATS_RSP = 0x9C,
+ QUERY_MAP = 0x1D,
+ QUERY_MAP_RSP = 0x9D,
+ REQUEST_MAP = 0x1E,
+ REQUEST_MAP_RSP = 0x9E,
+ REQUEST_UNMAP = 0x1F,
+ REQUEST_UNMAP_RSP = 0x9F,
+ VLAN_CTRL = 0x20,
+ VLAN_CTRL_RSP = 0xA0,
+};
+
+enum ibmvnic_crq_type {
+ IBMVNIC_CRQ_CMD = 0x80,
+ IBMVNIC_CRQ_CMD_RSP = 0x80,
+ IBMVNIC_CRQ_INIT_CMD = 0xC0,
+ IBMVNIC_CRQ_INIT_RSP = 0xC0,
+ IBMVNIC_CRQ_XPORT_EVENT = 0xFF,
+};
+
+enum ibmvfc_crq_format {
+ IBMVNIC_CRQ_INIT = 0x01,
+ IBMVNIC_CRQ_INIT_COMPLETE = 0x02,
+ IBMVNIC_PARTITION_MIGRATED = 0x06,
+};
+
+struct ibmvnic_crq_queue {
+ union ibmvnic_crq *msgs;
+ int size, cur;
+ dma_addr_t msg_token;
+ spinlock_t lock;
+};
+
+union sub_crq {
+ struct ibmvnic_generic_scrq generic;
+ struct ibmvnic_tx_comp_desc tx_comp;
+ struct ibmvnic_tx_desc v1;
+ struct ibmvnic_hdr_desc hdr;
+ struct ibmvnic_hdr_ext_desc hdr_ext;
+ struct ibmvnic_sge_desc sge;
+ struct ibmvnic_rx_comp_desc rx_comp;
+ struct ibmvnic_rx_buff_add_desc rx_add;
+};
+
+struct ibmvnic_sub_crq_queue {
+ union sub_crq *msgs;
+ int size, cur;
+ dma_addr_t msg_token;
+ unsigned long crq_num;
+ unsigned long hw_irq;
+ unsigned int irq;
+ unsigned int pool_index;
+ int scrq_num;
+ spinlock_t lock;
+ struct sk_buff *rx_skb_top;
+ struct ibmvnic_adapter *adapter;
+};
+
+struct ibmvnic_long_term_buff {
+ unsigned char *buff;
+ dma_addr_t addr;
+ u64 size;
+ u8 map_id;
+};
+
+struct ibmvnic_tx_buff {
+ struct sk_buff *skb;
+ dma_addr_t data_dma[IBMVNIC_MAX_FRAGS_PER_CRQ];
+ unsigned int data_len[IBMVNIC_MAX_FRAGS_PER_CRQ];
+ int index;
+ int pool_index;
+ bool last_frag;
+ bool used_bounce;
+};
+
+struct ibmvnic_tx_pool {
+ struct ibmvnic_tx_buff *tx_buff;
+ int *free_map;
+ int consumer_index;
+ int producer_index;
+ wait_queue_head_t ibmvnic_tx_comp_q;
+ struct task_struct *work_thread;
+ struct ibmvnic_long_term_buff long_term_buff;
+};
+
+struct ibmvnic_rx_buff {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ unsigned char *data;
+ int size;
+ int pool_index;
+};
+
+struct ibmvnic_rx_pool {
+ struct ibmvnic_rx_buff *rx_buff;
+ int size;
+ int index;
+ int buff_size;
+ atomic_t available;
+ int *free_map;
+ int next_free;
+ int next_alloc;
+ int active;
+ struct ibmvnic_long_term_buff long_term_buff;
+};
+
+struct ibmvnic_error_buff {
+ char *buff;
+ dma_addr_t dma;
+ int len;
+ struct list_head list;
+ __be32 error_id;
+};
+
+struct ibmvnic_fw_comp_internal {
+ struct ibmvnic_adapter *adapter;
+ int num;
+ struct debugfs_blob_wrapper desc_blob;
+ int paused;
+};
+
+struct ibmvnic_inflight_cmd {
+ union ibmvnic_crq crq;
+ struct list_head list;
+};
+
+struct ibmvnic_adapter {
+ struct vio_dev *vdev;
+ struct net_device *netdev;
+ struct ibmvnic_crq_queue crq;
+ u8 mac_addr[ETH_ALEN];
+ struct ibmvnic_query_ip_offload_buffer ip_offload_buf;
+ dma_addr_t ip_offload_tok;
+ struct ibmvnic_control_ip_offload_buffer ip_offload_ctrl;
+ dma_addr_t ip_offload_ctrl_tok;
+ bool migrated;
+ u32 msg_enable;
+ void *bounce_buffer;
+ int bounce_buffer_size;
+ dma_addr_t bounce_buffer_dma;
+
+ /* Statistics */
+ struct net_device_stats net_stats;
+ struct ibmvnic_statistics stats;
+ dma_addr_t stats_token;
+ struct completion stats_done;
+ spinlock_t stats_lock;
+ int replenish_no_mem;
+ int replenish_add_buff_success;
+ int replenish_add_buff_failure;
+ int replenish_task_cycles;
+ int tx_send_failed;
+ int tx_map_failed;
+
+ int phys_link_state;
+ int logical_link_state;
+
+ /* login data */
+ struct ibmvnic_login_buffer *login_buf;
+ dma_addr_t login_buf_token;
+ int login_buf_sz;
+
+ struct ibmvnic_login_rsp_buffer *login_rsp_buf;
+ dma_addr_t login_rsp_buf_token;
+ int login_rsp_buf_sz;
+
+ atomic_t running_cap_queries;
+
+ struct ibmvnic_sub_crq_queue **tx_scrq;
+ struct ibmvnic_sub_crq_queue **rx_scrq;
+ int requested_caps;
+
+ /* rx structs */
+ struct napi_struct *napi;
+ struct ibmvnic_rx_pool *rx_pool;
+ u64 promisc;
+
+ struct ibmvnic_tx_pool *tx_pool;
+ bool closing;
+ struct completion init_done;
+
+ struct list_head errors;
+ spinlock_t error_list_lock;
+
+ /* debugfs */
+ struct dentry *debugfs_dir;
+ struct dentry *debugfs_dump;
+ struct completion fw_done;
+ char *dump_data;
+ dma_addr_t dump_data_token;
+ int dump_data_size;
+ int ras_comp_num;
+ struct ibmvnic_fw_component *ras_comps;
+ struct ibmvnic_fw_comp_internal *ras_comp_int;
+ dma_addr_t ras_comps_tok;
+ struct dentry *ras_comps_ent;
+
+ /* in-flight commands that allocate and/or map memory*/
+ struct list_head inflight;
+ spinlock_t inflight_lock;
+
+ /* partner capabilities */
+ u64 min_tx_queues;
+ u64 min_rx_queues;
+ u64 min_rx_add_queues;
+ u64 max_tx_queues;
+ u64 max_rx_queues;
+ u64 max_rx_add_queues;
+ u64 req_tx_queues;
+ u64 req_rx_queues;
+ u64 req_rx_add_queues;
+ u64 min_tx_entries_per_subcrq;
+ u64 min_rx_add_entries_per_subcrq;
+ u64 max_tx_entries_per_subcrq;
+ u64 max_rx_add_entries_per_subcrq;
+ u64 req_tx_entries_per_subcrq;
+ u64 req_rx_add_entries_per_subcrq;
+ u64 tcp_ip_offload;
+ u64 promisc_requested;
+ u64 promisc_supported;
+ u64 min_mtu;
+ u64 max_mtu;
+ u64 req_mtu;
+ u64 max_multicast_filters;
+ u64 vlan_header_insertion;
+ u64 max_tx_sg_entries;
+ u64 rx_sg_supported;
+ u64 rx_sg_requested;
+ u64 opt_tx_comp_sub_queues;
+ u64 opt_rx_comp_queues;
+ u64 opt_rx_bufadd_q_per_rx_comp_q;
+ u64 opt_tx_entries_per_subcrq;
+ u64 opt_rxba_entries_per_subcrq;
+ __be64 tx_rx_desc_req;
+ u8 map_id;
+};
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 75ff109..b243c3c 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -42,7 +42,7 @@ MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
/* single workqueue for entire fm10k driver */
-struct workqueue_struct *fm10k_workqueue = NULL;
+struct workqueue_struct *fm10k_workqueue;
/**
* fm10k_init_module - Driver Registration Routine
@@ -56,8 +56,7 @@ static int __init fm10k_init_module(void)
pr_info("%s\n", fm10k_copyright);
/* create driver workqueue */
- if (!fm10k_workqueue)
- fm10k_workqueue = create_workqueue("fm10k");
+ fm10k_workqueue = create_workqueue("fm10k");
fm10k_dbg_init();
@@ -80,7 +79,6 @@ static void __exit fm10k_exit_module(void)
/* destroy driver workqueue */
flush_workqueue(fm10k_workqueue);
destroy_workqueue(fm10k_workqueue);
- fm10k_workqueue = NULL;
}
module_exit(fm10k_exit_module);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
index c7fea47..98202c3 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
@@ -57,7 +57,7 @@ static u16 fm10k_fifo_unused(struct fm10k_mbx_fifo *fifo)
}
/**
- * fm10k_fifo_empty - Test to verify if fifo is empty
+ * fm10k_fifo_empty - Test to verify if FIFO is empty
* @fifo: pointer to FIFO
*
* This function returns true if the FIFO is empty, else false
@@ -72,7 +72,7 @@ static bool fm10k_fifo_empty(struct fm10k_mbx_fifo *fifo)
* @fifo: pointer to FIFO
* @offset: offset to add to head
*
- * This function returns the indices into the fifo based on head + offset
+ * This function returns the indices into the FIFO based on head + offset
**/
static u16 fm10k_fifo_head_offset(struct fm10k_mbx_fifo *fifo, u16 offset)
{
@@ -84,7 +84,7 @@ static u16 fm10k_fifo_head_offset(struct fm10k_mbx_fifo *fifo, u16 offset)
* @fifo: pointer to FIFO
* @offset: offset to add to tail
*
- * This function returns the indices into the fifo based on tail + offset
+ * This function returns the indices into the FIFO based on tail + offset
**/
static u16 fm10k_fifo_tail_offset(struct fm10k_mbx_fifo *fifo, u16 offset)
{
@@ -160,7 +160,7 @@ static u16 fm10k_mbx_index_len(struct fm10k_mbx_info *mbx, u16 head, u16 tail)
/**
* fm10k_mbx_tail_add - Determine new tail value with added offset
* @mbx: pointer to mailbox
- * @offset: length to add to head offset
+ * @offset: length to add to tail offset
*
* This function takes the local tail index and recomputes it for
* a given length added as an offset.
@@ -176,7 +176,7 @@ static u16 fm10k_mbx_tail_add(struct fm10k_mbx_info *mbx, u16 offset)
/**
* fm10k_mbx_tail_sub - Determine new tail value with subtracted offset
* @mbx: pointer to mailbox
- * @offset: length to add to head offset
+ * @offset: length to add to tail offset
*
* This function takes the local tail index and recomputes it for
* a given length added as an offset.
@@ -240,7 +240,7 @@ static u16 fm10k_mbx_pushed_tail_len(struct fm10k_mbx_info *mbx)
}
/**
- * fm10k_fifo_write_copy - pulls data off of msg and places it in fifo
+ * fm10k_fifo_write_copy - pulls data off of msg and places it in FIFO
* @fifo: pointer to FIFO
* @msg: message array to populate
* @tail_offset: additional offset to add to tail pointer
@@ -336,6 +336,7 @@ static u16 fm10k_mbx_validate_msg_size(struct fm10k_mbx_info *mbx, u16 len)
/**
* fm10k_mbx_write_copy - pulls data off of Tx FIFO and places it in mbmem
+ * @hw: pointer to hardware structure
* @mbx: pointer to mailbox
*
* This function will take a section of the Tx FIFO and copy it into the
@@ -711,7 +712,7 @@ static bool fm10k_mbx_tx_complete(struct fm10k_mbx_info *mbx)
* @hw: pointer to hardware structure
* @mbx: pointer to mailbox
*
- * This function dequeues messages and hands them off to the tlv parser.
+ * This function dequeues messages and hands them off to the TLV parser.
* It will return the number of messages processed when called.
**/
static u16 fm10k_mbx_dequeue_rx(struct fm10k_hw *hw,
@@ -924,7 +925,7 @@ static void fm10k_mbx_create_fake_disconnect_hdr(struct fm10k_mbx_info *mbx)
}
/**
- * fm10k_mbx_create_error_msg - Generate a error message
+ * fm10k_mbx_create_error_msg - Generate an error message
* @mbx: pointer to mailbox
* @err: local error encountered
*
@@ -957,7 +958,6 @@ static void fm10k_mbx_create_error_msg(struct fm10k_mbx_info *mbx, s32 err)
/**
* fm10k_mbx_validate_msg_hdr - Validate common fields in the message header
* @mbx: pointer to mailbox
- * @msg: message array to read
*
* This function will parse up the fields in the mailbox header and return
* an error if the header contains any of a number of invalid configurations
@@ -1021,11 +1021,12 @@ static s32 fm10k_mbx_validate_msg_hdr(struct fm10k_mbx_info *mbx)
/**
* fm10k_mbx_create_reply - Generate reply based on state and remote head
+ * @hw: pointer to hardware structure
* @mbx: pointer to mailbox
* @head: acknowledgement number
*
* This function will generate an outgoing message based on the current
- * mailbox state and the remote fifo head. It will return the length
+ * mailbox state and the remote FIFO head. It will return the length
* of the outgoing message excluding header on success, and a negative value
* on error.
**/
@@ -1151,8 +1152,8 @@ static void fm10k_mbx_connect_reset(struct fm10k_mbx_info *mbx)
/**
* fm10k_mbx_process_connect - Process connect header
+ * @hw: pointer to hardware structure
* @mbx: pointer to mailbox
- * @msg: message array to process
*
* This function will read an incoming connect header and reply with the
* appropriate message. It will return a value indicating the number of
@@ -1198,6 +1199,7 @@ static s32 fm10k_mbx_process_connect(struct fm10k_hw *hw,
/**
* fm10k_mbx_process_data - Process data header
+ * @hw: pointer to hardware structure
* @mbx: pointer to mailbox
*
* This function will read an incoming data header and reply with the
@@ -1239,6 +1241,7 @@ static s32 fm10k_mbx_process_data(struct fm10k_hw *hw,
/**
* fm10k_mbx_process_disconnect - Process disconnect header
+ * @hw: pointer to hardware structure
* @mbx: pointer to mailbox
*
* This function will read an incoming disconnect header and reply with the
@@ -1291,6 +1294,7 @@ static s32 fm10k_mbx_process_disconnect(struct fm10k_hw *hw,
/**
* fm10k_mbx_process_error - Process error header
+ * @hw: pointer to hardware structure
* @mbx: pointer to mailbox
*
* This function will read an incoming error header and reply with the
@@ -1560,7 +1564,7 @@ static s32 fm10k_mbx_register_handlers(struct fm10k_mbx_info *mbx,
* @id: ID reference for PF as it supports up to 64 PF/VF mailboxes
*
* This function initializes the mailbox for use. It will split the
- * buffer provided an use that th populate both the Tx and Rx FIFO by
+ * buffer provided and use that to populate both the Tx and Rx FIFO by
* evenly splitting it. In order to allow for easy masking of head/tail
* the value reported in size must be a power of 2 and is reported in
* DWORDs, not bytes. Any invalid values will cause the mailbox to return
@@ -1637,7 +1641,7 @@ s32 fm10k_pfvf_mbx_init(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx,
* fm10k_sm_mbx_create_data_hdr - Generate a mailbox header for local FIFO
* @mbx: pointer to mailbox
*
- * This function returns a connection mailbox header
+ * This function returns a data mailbox header
**/
static void fm10k_sm_mbx_create_data_hdr(struct fm10k_mbx_info *mbx)
{
@@ -1730,8 +1734,6 @@ static s32 fm10k_sm_mbx_connect(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx)
fm10k_sm_mbx_create_connect_hdr(mbx, 0);
fm10k_mbx_write(hw, mbx);
- /* enable interrupt and notify other party of new message */
-
return 0;
}
@@ -1775,7 +1777,7 @@ static void fm10k_sm_mbx_disconnect(struct fm10k_hw *hw,
}
/**
- * fm10k_mbx_validate_fifo_hdr - Validate fields in the remote FIFO header
+ * fm10k_sm_mbx_validate_fifo_hdr - Validate fields in the remote FIFO header
* @mbx: pointer to mailbox
*
* This function will parse up the fields in the mailbox header and return
@@ -1853,7 +1855,7 @@ static void fm10k_sm_mbx_process_error(struct fm10k_mbx_info *mbx)
}
/**
- * fm10k_sm_mbx_create_error_message - Process an error in FIFO hdr
+ * fm10k_sm_mbx_create_error_msg - Process an error in FIFO header
* @mbx: pointer to mailbox
* @err: local error encountered
*
@@ -1883,6 +1885,7 @@ static void fm10k_sm_mbx_create_error_msg(struct fm10k_mbx_info *mbx, s32 err)
* fm10k_sm_mbx_receive - Take message from Rx mailbox FIFO and put it in Rx
* @hw: pointer to hardware structure
* @mbx: pointer to mailbox
+ * @tail: tail index of message
*
* This function will dequeue one message from the Rx switch manager mailbox
* FIFO and place it in the Rx mailbox FIFO for processing by software.
@@ -1922,6 +1925,7 @@ static s32 fm10k_sm_mbx_receive(struct fm10k_hw *hw,
* fm10k_sm_mbx_transmit - Take message from Tx and put it in Tx mailbox FIFO
* @hw: pointer to hardware structure
* @mbx: pointer to mailbox
+ * @head: head index of message
*
* This function will dequeue one message from the Tx mailbox FIFO and place
* it in the Tx switch manager mailbox FIFO for processing by hardware.
@@ -1961,11 +1965,12 @@ static void fm10k_sm_mbx_transmit(struct fm10k_hw *hw,
/**
* fm10k_sm_mbx_create_reply - Generate reply based on state and remote head
+ * @hw: pointer to hardware structure
* @mbx: pointer to mailbox
* @head: acknowledgement number
*
* This function will generate an outgoing message based on the current
- * mailbox state and the remote fifo head. It will return the length
+ * mailbox state and the remote FIFO head. It will return the length
* of the outgoing message excluding header on success, and a negative value
* on error.
**/
@@ -2077,7 +2082,7 @@ send_reply:
}
/**
- * fm10k_sm_mbx_process - Process mailbox switch mailbox interrupt
+ * fm10k_sm_mbx_process - Process switch manager mailbox interrupt
* @hw: pointer to hardware structure
* @mbx: pointer to mailbox
*
@@ -2133,7 +2138,12 @@ fifo_err:
* @mbx: pointer to mailbox
* @msg_data: handlers for mailbox events
*
- * This function for now is used to stub out the PF/SM mailbox
+ * This function initializes the PF/SM mailbox for use. It will split the
+ * buffer provided and use that to populate both the Tx and Rx FIFO by
+ * evenly splitting it. In order to allow for easy masking of head/tail
+ * the value reported in size must be a power of 2 and is reported in
+ * DWORDs, not bytes. Any invalid values will cause the mailbox to return
+ * error.
**/
s32 fm10k_sm_mbx_init(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx,
const struct fm10k_msg_data *msg_data)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h
index c4f18a8..245a0a3 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h
@@ -128,11 +128,11 @@ enum fm10k_mbx_state {
* The maximum message size is provided during connect to avoid
* jamming the mailbox with messages that do not fit.
* Err_no: Error number - Applies only to error headers
- * The error number provides a indication of the type of error
+ * The error number provides an indication of the type of error
* experienced.
*/
-/* macros for retriving and setting header values */
+/* macros for retrieving and setting header values */
#define FM10K_MSG_HDR_MASK(name) \
((0x1u << FM10K_MSG_##name##_SIZE) - 1)
#define FM10K_MSG_HDR_FIELD_SET(value, name) \
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 83ddf36..662569d 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -20,7 +20,7 @@
#include "fm10k.h"
#include <linux/vmalloc.h>
-#if IS_ENABLED(CONFIG_FM10K_VXLAN)
+#ifdef CONFIG_FM10K_VXLAN
#include <net/vxlan.h>
#endif /* CONFIG_FM10K_VXLAN */
@@ -556,11 +556,11 @@ int fm10k_open(struct net_device *netdev)
if (err)
goto err_set_queues;
-#if IS_ENABLED(CONFIG_FM10K_VXLAN)
+#ifdef CONFIG_FM10K_VXLAN
/* update VXLAN port configuration */
vxlan_get_rx_port(netdev);
-
#endif
+
fm10k_up(interface);
return 0;
@@ -1153,6 +1153,7 @@ static struct rtnl_link_stats64 *fm10k_get_stats64(struct net_device *netdev,
int fm10k_setup_tc(struct net_device *dev, u8 tc)
{
struct fm10k_intfc *interface = netdev_priv(dev);
+ int err;
/* Currently only the PF supports priority classes */
if (tc && (interface->hw.mac.type != fm10k_mac_pf))
@@ -1177,17 +1178,30 @@ int fm10k_setup_tc(struct net_device *dev, u8 tc)
netdev_reset_tc(dev);
netdev_set_num_tc(dev, tc);
- fm10k_init_queueing_scheme(interface);
+ err = fm10k_init_queueing_scheme(interface);
+ if (err)
+ goto err_queueing_scheme;
- fm10k_mbx_request_irq(interface);
+ err = fm10k_mbx_request_irq(interface);
+ if (err)
+ goto err_mbx_irq;
- if (netif_running(dev))
- fm10k_open(dev);
+ err = netif_running(dev) ? fm10k_open(dev) : 0;
+ if (err)
+ goto err_open;
/* flag to indicate SWPRI has yet to be updated */
interface->flags |= FM10K_FLAG_SWPRI_CONFIG;
return 0;
+err_open:
+ fm10k_mbx_free_irq(interface);
+err_mbx_irq:
+ fm10k_clear_queueing_scheme(interface);
+err_queueing_scheme:
+ netif_device_detach(dev);
+
+ return err;
}
static int fm10k_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index 020f6dc..4eb7a6f 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -186,7 +186,13 @@ static void fm10k_reinit(struct fm10k_intfc *interface)
}
/* reassociate interrupts */
- fm10k_mbx_request_irq(interface);
+ err = fm10k_mbx_request_irq(interface);
+ if (err)
+ goto err_mbx_irq;
+
+ err = fm10k_hw_ready(interface);
+ if (err)
+ goto err_open;
/* update hardware address for VFs if perm_addr has changed */
if (hw->mac.type == fm10k_mac_vf) {
@@ -206,14 +212,23 @@ static void fm10k_reinit(struct fm10k_intfc *interface)
/* reset clock */
fm10k_ts_reset(interface);
- if (netif_running(netdev))
- fm10k_open(netdev);
+ err = netif_running(netdev) ? fm10k_open(netdev) : 0;
+ if (err)
+ goto err_open;
fm10k_iov_resume(interface->pdev);
+ rtnl_unlock();
+
+ clear_bit(__FM10K_RESETTING, &interface->state);
+
+ return;
+err_open:
+ fm10k_mbx_free_irq(interface);
+err_mbx_irq:
+ fm10k_clear_queueing_scheme(interface);
reinit_err:
- if (err)
- netif_device_detach(netdev);
+ netif_device_detach(netdev);
rtnl_unlock();
@@ -897,7 +912,7 @@ static irqreturn_t fm10k_msix_mbx_vf(int __always_unused irq, void *data)
fm10k_mbx_unlock(interface);
}
- hw->mac.get_host_state = 1;
+ hw->mac.get_host_state = true;
fm10k_service_event_schedule(interface);
return IRQ_HANDLED;
@@ -1113,7 +1128,7 @@ static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data)
}
/* we should validate host state after interrupt event */
- hw->mac.get_host_state = 1;
+ hw->mac.get_host_state = true;
/* validate host state, and handle VF mailboxes in the service task */
fm10k_service_event_schedule(interface);
@@ -1176,7 +1191,7 @@ static s32 fm10k_mbx_mac_addr(struct fm10k_hw *hw, u32 **results,
/* MAC was changed so we need reset */
if (is_valid_ether_addr(hw->mac.perm_addr) &&
- memcmp(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN))
+ !ether_addr_equal(hw->mac.perm_addr, hw->mac.addr))
interface->flags |= FM10K_FLAG_RESET_REQUESTED;
/* VLAN override was changed, or default VLAN changed */
@@ -1620,7 +1635,7 @@ void fm10k_up(struct fm10k_intfc *interface)
netif_tx_start_all_queues(interface->netdev);
/* kick off the service timer now */
- hw->mac.get_host_state = 1;
+ hw->mac.get_host_state = true;
mod_timer(&interface->service_timer, jiffies);
}
@@ -2131,16 +2146,22 @@ static int fm10k_resume(struct pci_dev *pdev)
rtnl_lock();
err = fm10k_init_queueing_scheme(interface);
- if (!err) {
- fm10k_mbx_request_irq(interface);
- if (netif_running(netdev))
- err = fm10k_open(netdev);
- }
+ if (err)
+ goto err_queueing_scheme;
- rtnl_unlock();
+ err = fm10k_mbx_request_irq(interface);
+ if (err)
+ goto err_mbx_irq;
+ err = fm10k_hw_ready(interface);
if (err)
- return err;
+ goto err_open;
+
+ err = netif_running(netdev) ? fm10k_open(netdev) : 0;
+ if (err)
+ goto err_open;
+
+ rtnl_unlock();
/* assume host is not ready, to prevent race with watchdog in case we
* actually don't have connection to the switch
@@ -2158,6 +2179,14 @@ static int fm10k_resume(struct pci_dev *pdev)
netif_device_attach(netdev);
return 0;
+err_open:
+ fm10k_mbx_free_irq(interface);
+err_mbx_irq:
+ fm10k_clear_queueing_scheme(interface);
+err_queueing_scheme:
+ rtnl_unlock();
+
+ return err;
}
/**
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index 808307e..62ccebc 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -1250,7 +1250,7 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
/* block attempts to set MAC for a locked device */
if (is_valid_ether_addr(vf_info->mac) &&
- memcmp(mac, vf_info->mac, ETH_ALEN))
+ !ether_addr_equal(mac, vf_info->mac))
return FM10K_ERR_PARAM;
set = !(vlan & FM10K_VLAN_CLEAR);
@@ -1866,39 +1866,39 @@ static const struct fm10k_msg_data fm10k_msg_data_pf[] = {
FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
};
-static struct fm10k_mac_ops mac_ops_pf = {
- .get_bus_info = &fm10k_get_bus_info_generic,
- .reset_hw = &fm10k_reset_hw_pf,
- .init_hw = &fm10k_init_hw_pf,
- .start_hw = &fm10k_start_hw_generic,
- .stop_hw = &fm10k_stop_hw_generic,
- .update_vlan = &fm10k_update_vlan_pf,
- .read_mac_addr = &fm10k_read_mac_addr_pf,
- .update_uc_addr = &fm10k_update_uc_addr_pf,
- .update_mc_addr = &fm10k_update_mc_addr_pf,
- .update_xcast_mode = &fm10k_update_xcast_mode_pf,
- .update_int_moderator = &fm10k_update_int_moderator_pf,
- .update_lport_state = &fm10k_update_lport_state_pf,
- .update_hw_stats = &fm10k_update_hw_stats_pf,
- .rebind_hw_stats = &fm10k_rebind_hw_stats_pf,
- .configure_dglort_map = &fm10k_configure_dglort_map_pf,
- .set_dma_mask = &fm10k_set_dma_mask_pf,
- .get_fault = &fm10k_get_fault_pf,
- .get_host_state = &fm10k_get_host_state_pf,
- .adjust_systime = &fm10k_adjust_systime_pf,
- .read_systime = &fm10k_read_systime_pf,
+static const struct fm10k_mac_ops mac_ops_pf = {
+ .get_bus_info = fm10k_get_bus_info_generic,
+ .reset_hw = fm10k_reset_hw_pf,
+ .init_hw = fm10k_init_hw_pf,
+ .start_hw = fm10k_start_hw_generic,
+ .stop_hw = fm10k_stop_hw_generic,
+ .update_vlan = fm10k_update_vlan_pf,
+ .read_mac_addr = fm10k_read_mac_addr_pf,
+ .update_uc_addr = fm10k_update_uc_addr_pf,
+ .update_mc_addr = fm10k_update_mc_addr_pf,
+ .update_xcast_mode = fm10k_update_xcast_mode_pf,
+ .update_int_moderator = fm10k_update_int_moderator_pf,
+ .update_lport_state = fm10k_update_lport_state_pf,
+ .update_hw_stats = fm10k_update_hw_stats_pf,
+ .rebind_hw_stats = fm10k_rebind_hw_stats_pf,
+ .configure_dglort_map = fm10k_configure_dglort_map_pf,
+ .set_dma_mask = fm10k_set_dma_mask_pf,
+ .get_fault = fm10k_get_fault_pf,
+ .get_host_state = fm10k_get_host_state_pf,
+ .adjust_systime = fm10k_adjust_systime_pf,
+ .read_systime = fm10k_read_systime_pf,
};
-static struct fm10k_iov_ops iov_ops_pf = {
- .assign_resources = &fm10k_iov_assign_resources_pf,
- .configure_tc = &fm10k_iov_configure_tc_pf,
- .assign_int_moderator = &fm10k_iov_assign_int_moderator_pf,
+static const struct fm10k_iov_ops iov_ops_pf = {
+ .assign_resources = fm10k_iov_assign_resources_pf,
+ .configure_tc = fm10k_iov_configure_tc_pf,
+ .assign_int_moderator = fm10k_iov_assign_int_moderator_pf,
.assign_default_mac_vlan = fm10k_iov_assign_default_mac_vlan_pf,
- .reset_resources = &fm10k_iov_reset_resources_pf,
- .set_lport = &fm10k_iov_set_lport_pf,
- .reset_lport = &fm10k_iov_reset_lport_pf,
- .update_stats = &fm10k_iov_update_stats_pf,
- .report_timestamp = &fm10k_iov_report_timestamp_pf,
+ .reset_resources = fm10k_iov_reset_resources_pf,
+ .set_lport = fm10k_iov_set_lport_pf,
+ .reset_lport = fm10k_iov_reset_lport_pf,
+ .update_stats = fm10k_iov_update_stats_pf,
+ .report_timestamp = fm10k_iov_report_timestamp_pf,
};
static s32 fm10k_get_invariants_pf(struct fm10k_hw *hw)
@@ -1908,9 +1908,9 @@ static s32 fm10k_get_invariants_pf(struct fm10k_hw *hw)
return fm10k_sm_mbx_init(hw, &hw->mbx, fm10k_msg_data_pf);
}
-struct fm10k_info fm10k_pf_info = {
+const struct fm10k_info fm10k_pf_info = {
.mac = fm10k_mac_pf,
- .get_invariants = &fm10k_get_invariants_pf,
+ .get_invariants = fm10k_get_invariants_pf,
.mac_ops = &mac_ops_pf,
.iov_ops = &iov_ops_pf,
};
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
index a8fc512..b2d96b4 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
@@ -74,6 +74,11 @@ enum fm10k_pf_tlv_attr_id_v1 {
#define FM10K_MSG_UPDATE_PVID_PVID_SHIFT 16
#define FM10K_MSG_UPDATE_PVID_PVID_SIZE 16
+/* The following data structures are overlayed directly onto TLV mailbox
+ * messages, and must not break 4 byte alignment. Ensure the structures line
+ * up correctly as per their TLV definition.
+ */
+
struct fm10k_mac_update {
__le32 mac_lower;
__le16 mac_upper;
@@ -81,26 +86,26 @@ struct fm10k_mac_update {
__le16 glort;
u8 flags;
u8 action;
-} __packed;
+} __aligned(4) __packed;
struct fm10k_global_table_data {
__le32 used;
__le32 avail;
-} __packed;
+} __aligned(4) __packed;
struct fm10k_swapi_error {
__le32 status;
struct fm10k_global_table_data mac;
struct fm10k_global_table_data nexthop;
struct fm10k_global_table_data ffu;
-} __packed;
+} __aligned(4) __packed;
struct fm10k_swapi_1588_timestamp {
__le64 egress;
__le64 ingress;
__le16 dglort;
__le16 sglort;
-} __packed;
+} __aligned(4) __packed;
s32 fm10k_msg_lport_map_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *);
extern const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[];
@@ -128,5 +133,5 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *, u32 **,
s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *, u32 **,
struct fm10k_mbx_info *);
-extern struct fm10k_info fm10k_pf_info;
+extern const struct fm10k_info fm10k_pf_info;
#endif /* _FM10K_PF_H */
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
index 95afb5c..ab01bb3 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
@@ -755,7 +755,7 @@ parse_nested:
err = fm10k_tlv_attr_get_mac_vlan(
results[FM10K_TEST_MSG_MAC_ADDR],
result_mac, &result_vlan);
- if (!err && memcmp(test_mac, result_mac, ETH_ALEN))
+ if (!err && !ether_addr_equal(test_mac, result_mac))
err = FM10K_ERR_INVALID_VALUE;
if (!err && test_vlan != result_vlan)
err = FM10K_ERR_INVALID_VALUE;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h
index d5ad359..e1845e0 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h
@@ -38,9 +38,9 @@ struct fm10k_msg_data;
* mailbox size we will provide a message with the above header and it
* will be segmented and transported to the mailbox to the other side where
* it is reassembled. It contains the following fields:
- * Len: Length of the message in bytes excluding the message header
+ * Length: Length of the message in bytes excluding the message header
* Flags: TBD
- * Rule: These will be the message/argument types we pass
+ * Type/ID: These will be the message/argument types we pass
*/
/* message data header */
#define FM10K_TLV_ID_SHIFT 0
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
index 098883d..854ebb1 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
@@ -550,7 +550,6 @@ struct fm10k_mac_ops {
struct fm10k_dglort_cfg *);
void (*set_dma_mask)(struct fm10k_hw *, u64);
s32 (*get_fault)(struct fm10k_hw *, int, struct fm10k_fault *);
- void (*request_lport_map)(struct fm10k_hw *);
s32 (*adjust_systime)(struct fm10k_hw *, s32 ppb);
u64 (*read_systime)(struct fm10k_hw *);
};
@@ -660,10 +659,10 @@ enum fm10k_devices {
};
struct fm10k_info {
- enum fm10k_mac_type mac;
- s32 (*get_invariants)(struct fm10k_hw *);
- struct fm10k_mac_ops *mac_ops;
- struct fm10k_iov_ops *iov_ops;
+ enum fm10k_mac_type mac;
+ s32 (*get_invariants)(struct fm10k_hw *);
+ const struct fm10k_mac_ops *mac_ops;
+ const struct fm10k_iov_ops *iov_ops;
};
struct fm10k_hw {
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
index 5445c0f..91f8d73 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
@@ -298,7 +298,7 @@ static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort,
/* verify we are not locked down on the MAC address */
if (is_valid_ether_addr(hw->mac.perm_addr) &&
- memcmp(hw->mac.perm_addr, mac, ETH_ALEN))
+ !ether_addr_equal(hw->mac.perm_addr, mac))
return FM10K_ERR_PARAM;
/* add bit to notify us if this is a set or clear operation */
@@ -562,25 +562,25 @@ static const struct fm10k_msg_data fm10k_msg_data_vf[] = {
FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
};
-static struct fm10k_mac_ops mac_ops_vf = {
- .get_bus_info = &fm10k_get_bus_info_generic,
- .reset_hw = &fm10k_reset_hw_vf,
- .init_hw = &fm10k_init_hw_vf,
- .start_hw = &fm10k_start_hw_generic,
- .stop_hw = &fm10k_stop_hw_vf,
- .update_vlan = &fm10k_update_vlan_vf,
- .read_mac_addr = &fm10k_read_mac_addr_vf,
- .update_uc_addr = &fm10k_update_uc_addr_vf,
- .update_mc_addr = &fm10k_update_mc_addr_vf,
- .update_xcast_mode = &fm10k_update_xcast_mode_vf,
- .update_int_moderator = &fm10k_update_int_moderator_vf,
- .update_lport_state = &fm10k_update_lport_state_vf,
- .update_hw_stats = &fm10k_update_hw_stats_vf,
- .rebind_hw_stats = &fm10k_rebind_hw_stats_vf,
- .configure_dglort_map = &fm10k_configure_dglort_map_vf,
- .get_host_state = &fm10k_get_host_state_generic,
- .adjust_systime = &fm10k_adjust_systime_vf,
- .read_systime = &fm10k_read_systime_vf,
+static const struct fm10k_mac_ops mac_ops_vf = {
+ .get_bus_info = fm10k_get_bus_info_generic,
+ .reset_hw = fm10k_reset_hw_vf,
+ .init_hw = fm10k_init_hw_vf,
+ .start_hw = fm10k_start_hw_generic,
+ .stop_hw = fm10k_stop_hw_vf,
+ .update_vlan = fm10k_update_vlan_vf,
+ .read_mac_addr = fm10k_read_mac_addr_vf,
+ .update_uc_addr = fm10k_update_uc_addr_vf,
+ .update_mc_addr = fm10k_update_mc_addr_vf,
+ .update_xcast_mode = fm10k_update_xcast_mode_vf,
+ .update_int_moderator = fm10k_update_int_moderator_vf,
+ .update_lport_state = fm10k_update_lport_state_vf,
+ .update_hw_stats = fm10k_update_hw_stats_vf,
+ .rebind_hw_stats = fm10k_rebind_hw_stats_vf,
+ .configure_dglort_map = fm10k_configure_dglort_map_vf,
+ .get_host_state = fm10k_get_host_state_generic,
+ .adjust_systime = fm10k_adjust_systime_vf,
+ .read_systime = fm10k_read_systime_vf,
};
static s32 fm10k_get_invariants_vf(struct fm10k_hw *hw)
@@ -590,8 +590,8 @@ static s32 fm10k_get_invariants_vf(struct fm10k_hw *hw)
return fm10k_pfvf_mbx_init(hw, &hw->mbx, fm10k_msg_data_vf, 0);
}
-struct fm10k_info fm10k_vf_info = {
+const struct fm10k_info fm10k_vf_info = {
.mac = fm10k_mac_vf,
- .get_invariants = &fm10k_get_invariants_vf,
+ .get_invariants = fm10k_get_invariants_vf,
.mac_ops = &mac_ops_vf,
};
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.h b/drivers/net/ethernet/intel/fm10k/fm10k_vf.h
index 06a99d7..c4439f1 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.h
@@ -74,5 +74,5 @@ extern const struct fm10k_tlv_attr fm10k_1588_msg_attr[];
#define FM10K_VF_MSG_1588_HANDLER(func) \
FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_1588, fm10k_1588_msg_attr, func)
-extern struct fm10k_info fm10k_vf_info;
+extern const struct fm10k_info fm10k_vf_info;
#endif /* _FM10K_VF_H */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index f4c9a42..4b9156c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -317,7 +317,7 @@ enum ixgbe_ring_f_enum {
};
#define IXGBE_MAX_RSS_INDICES 16
-#define IXGBE_MAX_RSS_INDICES_X550 64
+#define IXGBE_MAX_RSS_INDICES_X550 63
#define IXGBE_MAX_VMDQ_INDICES 64
#define IXGBE_MAX_FDIR_INDICES 63 /* based on q_vector limit */
#define IXGBE_MAX_FCOE_INDICES 8
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 1ed4c9a..2448eba 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -151,6 +151,34 @@ static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
};
#define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
+/* currently supported speeds for 10G */
+#define ADVRTSD_MSK_10G (SUPPORTED_10000baseT_Full | \
+ SUPPORTED_10000baseKX4_Full | \
+ SUPPORTED_10000baseKR_Full)
+
+#define ixgbe_isbackplane(type) ((type) == ixgbe_media_type_backplane)
+
+static u32 ixgbe_get_supported_10gtypes(struct ixgbe_hw *hw)
+{
+ if (!ixgbe_isbackplane(hw->phy.media_type))
+ return SUPPORTED_10000baseT_Full;
+
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_82598:
+ case IXGBE_DEV_ID_82599_KX4:
+ case IXGBE_DEV_ID_82599_KX4_MEZZ:
+ case IXGBE_DEV_ID_X550EM_X_KX4:
+ return SUPPORTED_10000baseKX4_Full;
+ case IXGBE_DEV_ID_82598_BX:
+ case IXGBE_DEV_ID_82599_KR:
+ case IXGBE_DEV_ID_X550EM_X_KR:
+ return SUPPORTED_10000baseKR_Full;
+ default:
+ return SUPPORTED_10000baseKX4_Full |
+ SUPPORTED_10000baseKR_Full;
+ }
+}
+
static int ixgbe_get_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
{
@@ -165,29 +193,30 @@ static int ixgbe_get_settings(struct net_device *netdev,
/* set the supported link speeds */
if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
- ecmd->supported |= SUPPORTED_10000baseT_Full;
+ ecmd->supported |= ixgbe_get_supported_10gtypes(hw);
if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
ecmd->supported |= SUPPORTED_1000baseT_Full;
if (supported_link & IXGBE_LINK_SPEED_100_FULL)
- ecmd->supported |= SUPPORTED_100baseT_Full;
+ ecmd->supported |= ixgbe_isbackplane(hw->phy.media_type) ?
+ SUPPORTED_1000baseKX_Full :
+ SUPPORTED_1000baseT_Full;
+ /* default advertised speed if phy.autoneg_advertised isn't set */
+ ecmd->advertising = ecmd->supported;
/* set the advertised speeds */
if (hw->phy.autoneg_advertised) {
+ ecmd->advertising = 0;
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
ecmd->advertising |= ADVERTISED_100baseT_Full;
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
- ecmd->advertising |= ADVERTISED_10000baseT_Full;
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
- ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ ecmd->advertising |= ecmd->supported & ADVRTSD_MSK_10G;
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) {
+ if (ecmd->supported & SUPPORTED_1000baseKX_Full)
+ ecmd->advertising |= ADVERTISED_1000baseKX_Full;
+ else
+ ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ }
} else {
- /* default modes in case phy.autoneg_advertised isn't set */
- if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
- ecmd->advertising |= ADVERTISED_10000baseT_Full;
- if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
- ecmd->advertising |= ADVERTISED_1000baseT_Full;
- if (supported_link & IXGBE_LINK_SPEED_100_FULL)
- ecmd->advertising |= ADVERTISED_100baseT_Full;
-
if (hw->phy.multispeed_fiber && !autoneg) {
if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
ecmd->advertising = ADVERTISED_10000baseT_Full;
@@ -225,6 +254,10 @@ static int ixgbe_get_settings(struct net_device *netdev,
case ixgbe_phy_sfp_avago:
case ixgbe_phy_sfp_intel:
case ixgbe_phy_sfp_unknown:
+ case ixgbe_phy_qsfp_passive_unknown:
+ case ixgbe_phy_qsfp_active_unknown:
+ case ixgbe_phy_qsfp_intel:
+ case ixgbe_phy_qsfp_unknown:
/* SFP+ devices, further checking needed */
switch (adapter->hw.phy.sfp_type) {
case ixgbe_sfp_type_da_cu:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index c5c0fb4..ea9537d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -9015,8 +9015,7 @@ skip_sriov:
netdev->vlan_features |= NETIF_F_IPV6_CSUM;
netdev->vlan_features |= NETIF_F_SG;
- netdev->hw_enc_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM;
+ netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->priv_flags |= IFF_SUPP_NOFCS;
@@ -9025,9 +9024,7 @@ skip_sriov:
switch (adapter->hw.mac.type) {
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
- netdev->hw_enc_features |= NETIF_F_RXCSUM |
- NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM;
+ netdev->hw_enc_features |= NETIF_F_RXCSUM;
break;
default:
break;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index eeff3d0..8025a3f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -593,11 +593,11 @@ static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf)
/* post increment loop, covers VLVF_ENTRIES - 1 to 0 */
for (i = IXGBE_VLVF_ENTRIES; i--;) {
- u32 word = IXGBE_VLVFB(i * 2 + vf / 32);
u32 bits[2], vlvfb, vid, vfta, vlvf;
- u32 mask = 1 << (vf / 32);
+ u32 word = i * 2 + vf / 32;
+ u32 mask = 1 << (vf % 32);
- vlvfb = IXGBE_READ_REG(hw, word);
+ vlvfb = IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
/* if our bit isn't set we can skip it */
if (!(vlvfb & mask))
@@ -608,7 +608,7 @@ static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf)
/* create 64b mask to chedk to see if we should clear VLVF */
bits[word % 2] = vlvfb;
- bits[(word % 2) ^ 1] = IXGBE_READ_REG(hw, word ^ 1);
+ bits[~word % 2] = IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1));
/* if promisc is enabled, PF will be present, leave VFTA */
if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 06add27..5f53cc6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -3520,7 +3520,7 @@ struct ixgbe_info {
#define IXGBE_FUSES0_GROUP(_i) (0x11158 + ((_i) * 4))
#define IXGBE_FUSES0_300MHZ BIT(5)
-#define IXGBE_FUSES0_REV1 BIT(6)
+#define IXGBE_FUSES0_REV_MASK (3 << 6)
#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010)
#define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index f4ef0d1..87aca3f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -1857,10 +1857,6 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
u32 save_autoneg;
bool link_up;
- /* SW LPLU not required on later HW revisions. */
- if (IXGBE_FUSES0_REV1 & IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0)))
- return 0;
-
/* If blocked by MNG FW, then don't restart AN */
if (ixgbe_check_reset_blocked(hw))
return 0;
@@ -2000,8 +1996,9 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
ixgbe_setup_internal_phy_t_x550em;
/* setup SW LPLU only for first revision */
- if (!(IXGBE_FUSES0_REV1 & IXGBE_READ_REG(hw,
- IXGBE_FUSES0_GROUP(0))))
+ if (hw->mac.type == ixgbe_mac_X550EM_x &&
+ !(IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0)) &
+ IXGBE_FUSES0_REV_MASK))
phy->ops.enter_lplu = ixgbe_enter_lplu_t_x550em;
phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index f098952..3558f01 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1016,6 +1016,8 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
ixgbevf_for_each_ring(ring, q_vector->tx)
clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
+ if (budget <= 0)
+ return budget;
#ifdef CONFIG_NET_RX_BUSY_POLL
if (!ixgbevf_qv_lock_napi(q_vector))
return budget;
@@ -1045,7 +1047,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
return budget;
/* all work done, exit the polling mode */
napi_complete_done(napi, work_done);
- if (adapter->rx_itr_setting & 1)
+ if (adapter->rx_itr_setting == 1)
ixgbevf_set_itr(q_vector);
if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
!test_bit(__IXGBEVF_REMOVING, &adapter->state))
@@ -1248,9 +1250,10 @@ static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
new_itr = IXGBE_20K_ITR;
break;
case bulk_latency:
- default:
new_itr = IXGBE_12K_ITR;
break;
+ default:
+ break;
}
if (new_itr != q_vector->itr) {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index af8a48b..22379eb 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -855,7 +855,6 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
err_debugfs_init:
mlxsw_core->driver->fini(mlxsw_core->driver_priv);
err_driver_init:
- mlxsw_hwmon_fini(mlxsw_core->hwmon);
err_hwmon_init:
mlxsw_emad_fini(mlxsw_core);
err_emad_init:
@@ -878,7 +877,6 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core)
mlxsw_core_debugfs_fini(mlxsw_core);
mlxsw_core->driver->fini(mlxsw_core->driver_priv);
- mlxsw_hwmon_fini(mlxsw_core->hwmon);
mlxsw_emad_fini(mlxsw_core);
mlxsw_core->bus->fini(mlxsw_core->bus_priv);
kfree(mlxsw_core->lag.mapping);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 4833fb3..a017236 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -239,10 +239,6 @@ static inline int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
return 0;
}
-static inline void mlxsw_hwmon_fini(struct mlxsw_hwmon *mlxsw_hwmon)
-{
-}
-
#endif
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
index b86db96..5b9364f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
@@ -49,7 +49,7 @@ struct mlxsw_hwmon_attr {
struct device_attribute dev_attr;
struct mlxsw_hwmon *hwmon;
unsigned int type_index;
- char name[16];
+ char name[32];
};
struct mlxsw_hwmon {
@@ -107,6 +107,32 @@ static ssize_t mlxsw_hwmon_temp_max_show(struct device *dev,
return sprintf(buf, "%u\n", temp_max);
}
+static ssize_t mlxsw_hwmon_temp_rst_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
+ struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
+ char mtmp_pl[MLXSW_REG_MTMP_LEN];
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err)
+ return err;
+ if (val != 1)
+ return -EINVAL;
+
+ mlxsw_reg_mtmp_pack(mtmp_pl, mlwsw_hwmon_attr->type_index, true, true);
+ err = mlxsw_reg_write(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
+ if (err) {
+ dev_err(mlxsw_hwmon->bus_info->dev, "Failed to reset temp sensor history\n");
+ return err;
+ }
+ return err ? err : len;
+}
+
static ssize_t mlxsw_hwmon_fan_rpm_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -175,6 +201,7 @@ static ssize_t mlxsw_hwmon_pwm_store(struct device *dev,
enum mlxsw_hwmon_attr_type {
MLXSW_HWMON_ATTR_TYPE_TEMP,
MLXSW_HWMON_ATTR_TYPE_TEMP_MAX,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_RST,
MLXSW_HWMON_ATTR_TYPE_FAN_RPM,
MLXSW_HWMON_ATTR_TYPE_PWM,
};
@@ -201,6 +228,12 @@ static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon *mlxsw_hwmon,
snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name),
"temp%u_highest", num + 1);
break;
+ case MLXSW_HWMON_ATTR_TYPE_TEMP_RST:
+ mlxsw_hwmon_attr->dev_attr.store = mlxsw_hwmon_temp_rst_store;
+ mlxsw_hwmon_attr->dev_attr.attr.mode = S_IWUSR;
+ snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name),
+ "temp%u_reset_history", num + 1);
+ break;
case MLXSW_HWMON_ATTR_TYPE_FAN_RPM:
mlxsw_hwmon_attr->dev_attr.show = mlxsw_hwmon_fan_rpm_show;
mlxsw_hwmon_attr->dev_attr.attr.mode = S_IRUGO;
@@ -254,6 +287,8 @@ static int mlxsw_hwmon_temp_init(struct mlxsw_hwmon *mlxsw_hwmon)
MLXSW_HWMON_ATTR_TYPE_TEMP, i, i);
mlxsw_hwmon_attr_add(mlxsw_hwmon,
MLXSW_HWMON_ATTR_TYPE_TEMP_MAX, i, i);
+ mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_RST, i, i);
}
return 0;
}
@@ -299,7 +334,8 @@ int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
struct device *hwmon_dev;
int err;
- mlxsw_hwmon = kzalloc(sizeof(*mlxsw_hwmon), GFP_KERNEL);
+ mlxsw_hwmon = devm_kzalloc(mlxsw_bus_info->dev, sizeof(*mlxsw_hwmon),
+ GFP_KERNEL);
if (!mlxsw_hwmon)
return -ENOMEM;
mlxsw_hwmon->core = mlxsw_core;
@@ -332,11 +368,5 @@ int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
err_hwmon_register:
err_fans_init:
err_temp_init:
- kfree(mlxsw_hwmon);
return err;
}
-
-void mlxsw_hwmon_fini(struct mlxsw_hwmon *mlxsw_hwmon)
-{
- kfree(mlxsw_hwmon);
-}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
index a72bcdd..4b76c69 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
@@ -167,7 +167,7 @@ struct qlcnic_dcb_cfg {
u32 version;
};
-static struct qlcnic_dcb_ops qlcnic_83xx_dcb_ops = {
+static const struct qlcnic_dcb_ops qlcnic_83xx_dcb_ops = {
.init_dcbnl_ops = __qlcnic_init_dcbnl_ops,
.free = __qlcnic_dcb_free,
.attach = __qlcnic_dcb_attach,
@@ -180,7 +180,7 @@ static struct qlcnic_dcb_ops qlcnic_83xx_dcb_ops = {
.aen_handler = qlcnic_83xx_dcb_aen_handler,
};
-static struct qlcnic_dcb_ops qlcnic_82xx_dcb_ops = {
+static const struct qlcnic_dcb_ops qlcnic_82xx_dcb_ops = {
.init_dcbnl_ops = __qlcnic_init_dcbnl_ops,
.free = __qlcnic_dcb_free,
.attach = __qlcnic_dcb_attach,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
index 3cf4a10..9777e57 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
@@ -37,7 +37,7 @@ struct qlcnic_dcb {
struct qlcnic_adapter *adapter;
struct delayed_work aen_work;
struct workqueue_struct *wq;
- struct qlcnic_dcb_ops *ops;
+ const struct qlcnic_dcb_ops *ops;
struct qlcnic_dcb_cfg *cfg;
unsigned long state;
};
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 79ef799..58365bc 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -3947,7 +3947,7 @@ static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp)
data = (ioffset_p3<<12)|(ioffset_p2<<8)|(ioffset_p1<<4)|(ioffset_p0);
if ((ioffset_p3 != 0x0f) || (ioffset_p2 != 0x0f) ||
- (ioffset_p1 != 0x0f) || (ioffset_p0 == 0x0f)) {
+ (ioffset_p1 != 0x0f) || (ioffset_p0 != 0x0f)) {
rtl_writephy(tp, 0x1f, 0x0bcf);
rtl_writephy(tp, 0x16, data);
rtl_writephy(tp, 0x1f, 0x0000);
@@ -6136,7 +6136,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
rtl_pcie_state_l2l3_enable(tp, false);
rtl_writephy(tp, 0x1f, 0x0c42);
- rg_saw_cnt = rtl_readphy(tp, 0x13);
+ rg_saw_cnt = (rtl_readphy(tp, 0x13) & 0x3fff);
rtl_writephy(tp, 0x1f, 0x0000);
if (rg_saw_cnt > 0) {
u16 sw_cnt_1ms_ini;
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 230fabd..98d33d4 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -486,10 +486,17 @@ static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
BUILD_BUG_ON(MC_CMD_ALLOC_PIOBUF_IN_LEN != 0);
for (i = 0; i < n; i++) {
- rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0,
- outbuf, sizeof(outbuf), &outlen);
- if (rc)
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc) {
+ /* Don't display the MC error if we didn't have space
+ * for a VF.
+ */
+ if (!(efx_ef10_is_vf(efx) && rc == -ENOSPC))
+ efx_mcdi_display_error(efx, MC_CMD_ALLOC_PIOBUF,
+ 0, outbuf, outlen, rc);
break;
+ }
if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
rc = -EIO;
break;
@@ -3833,13 +3840,12 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx)
MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
table->entry[filter_idx].handle);
- rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, inbuf,
+ sizeof(inbuf), NULL, 0, NULL);
if (rc)
- netdev_WARN(efx->net_dev,
- "filter_idx=%#x handle=%#llx\n",
- filter_idx,
- table->entry[filter_idx].handle);
+ netif_info(efx, drv, efx->net_dev,
+ "%s: filter %04x remove failed\n",
+ __func__, filter_idx);
kfree(spec);
}
@@ -3848,11 +3854,14 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx)
}
#define EFX_EF10_FILTER_DO_MARK_OLD(id) \
- if (id != EFX_EF10_FILTER_ID_INVALID) { \
- filter_idx = efx_ef10_filter_get_unsafe_id(efx, id); \
- WARN_ON(!table->entry[filter_idx].spec); \
- table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD; \
- }
+ if (id != EFX_EF10_FILTER_ID_INVALID) { \
+ filter_idx = efx_ef10_filter_get_unsafe_id(efx, id); \
+ if (!table->entry[filter_idx].spec) \
+ netif_dbg(efx, drv, efx->net_dev, \
+ "%s: marked null spec old %04x:%04x\n", \
+ __func__, id, filter_idx); \
+ table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;\
+ }
static void efx_ef10_filter_mark_old(struct efx_nic *efx)
{
struct efx_ef10_filter_table *table = efx->filter_state;
@@ -4026,9 +4035,10 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast,
rc = efx_ef10_filter_insert(efx, &spec, true);
if (rc < 0) {
- netif_warn(efx, drv, efx->net_dev,
- "%scast mismatch filter insert failed rc=%d\n",
- multicast ? "Multi" : "Uni", rc);
+ netif_printk(efx, drv, rc == -EPERM ? KERN_DEBUG : KERN_WARNING,
+ efx->net_dev,
+ "%scast mismatch filter insert failed rc=%d\n",
+ multicast ? "Multi" : "Uni", rc);
} else if (multicast) {
table->mcdef_id = efx_ef10_filter_get_unsafe_id(efx, rc);
if (!nic_data->workaround_26807) {
@@ -4070,19 +4080,31 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast,
static void efx_ef10_filter_remove_old(struct efx_nic *efx)
{
struct efx_ef10_filter_table *table = efx->filter_state;
- bool remove_failed = false;
+ int remove_failed = 0;
+ int remove_noent = 0;
+ int rc;
int i;
for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
if (ACCESS_ONCE(table->entry[i].spec) &
EFX_EF10_FILTER_FLAG_AUTO_OLD) {
- if (efx_ef10_filter_remove_internal(
- efx, 1U << EFX_FILTER_PRI_AUTO,
- i, true) < 0)
- remove_failed = true;
+ rc = efx_ef10_filter_remove_internal(efx,
+ 1U << EFX_FILTER_PRI_AUTO, i, true);
+ if (rc == -ENOENT)
+ remove_noent++;
+ else if (rc)
+ remove_failed++;
}
}
- WARN_ON(remove_failed);
+
+ if (remove_failed)
+ netif_info(efx, drv, efx->net_dev,
+ "%s: failed to remove %d filters\n",
+ __func__, remove_failed);
+ if (remove_noent)
+ netif_info(efx, drv, efx->net_dev,
+ "%s: failed to remove %d non-existent filters\n",
+ __func__, remove_noent);
}
static int efx_ef10_vport_set_mac_address(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 6f69743..0705ec86 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -3174,14 +3174,15 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
rtnl_lock();
rc = efx_mtd_probe(efx);
rtnl_unlock();
- if (rc)
+ if (rc && rc != -EPERM)
netif_warn(efx, probe, efx->net_dev,
"failed to create MTDs (%d)\n", rc);
rc = pci_enable_pcie_error_reporting(pci_dev);
if (rc && rc != -EINVAL)
- netif_warn(efx, probe, efx->net_dev,
- "pci_enable_pcie_error_reporting failed (%d)\n", rc);
+ netif_notice(efx, probe, efx->net_dev,
+ "PCIE error reporting unavailable (%d).\n",
+ rc);
return 0;
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 41fb6b6..d28e7dd 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -82,6 +82,7 @@ int efx_mcdi_init(struct efx_nic *efx)
mcdi->logging_enabled = mcdi_logging_default;
#endif
init_waitqueue_head(&mcdi->wq);
+ init_waitqueue_head(&mcdi->proxy_rx_wq);
spin_lock_init(&mcdi->iface_lock);
mcdi->state = MCDI_STATE_QUIESCENT;
mcdi->mode = MCDI_MODE_POLL;
@@ -315,6 +316,7 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx)
}
#endif
+ mcdi->resprc_raw = 0;
if (error && mcdi->resp_data_len == 0) {
netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
mcdi->resprc = -EIO;
@@ -325,8 +327,8 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx)
mcdi->resprc = -EIO;
} else if (error) {
efx->type->mcdi_read_response(efx, &hdr, mcdi->resp_hdr_len, 4);
- mcdi->resprc =
- efx_mcdi_errno(EFX_DWORD_FIELD(hdr, EFX_DWORD_0));
+ mcdi->resprc_raw = EFX_DWORD_FIELD(hdr, EFX_DWORD_0);
+ mcdi->resprc = efx_mcdi_errno(mcdi->resprc_raw);
} else {
mcdi->resprc = 0;
}
@@ -621,9 +623,30 @@ efx_mcdi_check_supported(struct efx_nic *efx, unsigned int cmd, size_t inlen)
return 0;
}
-static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
+static bool efx_mcdi_get_proxy_handle(struct efx_nic *efx,
+ size_t hdr_len, size_t data_len,
+ u32 *proxy_handle)
+{
+ MCDI_DECLARE_BUF_ERR(testbuf);
+ const size_t buflen = sizeof(testbuf);
+
+ if (!proxy_handle || data_len < buflen)
+ return false;
+
+ efx->type->mcdi_read_response(efx, testbuf, hdr_len, buflen);
+ if (MCDI_DWORD(testbuf, ERR_CODE) == MC_CMD_ERR_PROXY_PENDING) {
+ *proxy_handle = MCDI_DWORD(testbuf, ERR_PROXY_PENDING_HANDLE);
+ return true;
+ }
+
+ return false;
+}
+
+static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned int cmd,
+ size_t inlen,
efx_dword_t *outbuf, size_t outlen,
- size_t *outlen_actual, bool quiet)
+ size_t *outlen_actual, bool quiet,
+ u32 *proxy_handle, int *raw_rc)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
MCDI_DECLARE_BUF_ERR(errbuf);
@@ -657,6 +680,9 @@ static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
spin_unlock_bh(&mcdi->iface_lock);
}
+ if (proxy_handle)
+ *proxy_handle = 0;
+
if (rc != 0) {
if (outlen_actual)
*outlen_actual = 0;
@@ -669,6 +695,8 @@ static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
* acquiring the iface_lock. */
spin_lock_bh(&mcdi->iface_lock);
rc = mcdi->resprc;
+ if (raw_rc)
+ *raw_rc = mcdi->resprc_raw;
hdr_len = mcdi->resp_hdr_len;
data_len = mcdi->resp_data_len;
err_len = min(sizeof(errbuf), data_len);
@@ -689,6 +717,12 @@ static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n",
-rc);
efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
+ } else if (proxy_handle && (rc == -EPROTO) &&
+ efx_mcdi_get_proxy_handle(efx, hdr_len, data_len,
+ proxy_handle)) {
+ mcdi->proxy_rx_status = 0;
+ mcdi->proxy_rx_handle = 0;
+ mcdi->state = MCDI_STATE_PROXY_WAIT;
} else if (rc && !quiet) {
efx_mcdi_display_error(efx, cmd, inlen, errbuf, err_len,
rc);
@@ -701,34 +735,195 @@ static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
}
}
- efx_mcdi_release(mcdi);
+ if (!proxy_handle || !*proxy_handle)
+ efx_mcdi_release(mcdi);
return rc;
}
-static int _efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
+static void efx_mcdi_proxy_abort(struct efx_mcdi_iface *mcdi)
+{
+ if (mcdi->state == MCDI_STATE_PROXY_WAIT) {
+ /* Interrupt the proxy wait. */
+ mcdi->proxy_rx_status = -EINTR;
+ wake_up(&mcdi->proxy_rx_wq);
+ }
+}
+
+static void efx_mcdi_ev_proxy_response(struct efx_nic *efx,
+ u32 handle, int status)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+ WARN_ON(mcdi->state != MCDI_STATE_PROXY_WAIT);
+
+ mcdi->proxy_rx_status = efx_mcdi_errno(status);
+ /* Ensure the status is written before we update the handle, since the
+ * latter is used to check if we've finished.
+ */
+ wmb();
+ mcdi->proxy_rx_handle = handle;
+ wake_up(&mcdi->proxy_rx_wq);
+}
+
+static int efx_mcdi_proxy_wait(struct efx_nic *efx, u32 handle, bool quiet)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ int rc;
+
+ /* Wait for a proxy event, or timeout. */
+ rc = wait_event_timeout(mcdi->proxy_rx_wq,
+ mcdi->proxy_rx_handle != 0 ||
+ mcdi->proxy_rx_status == -EINTR,
+ MCDI_RPC_TIMEOUT);
+
+ if (rc <= 0) {
+ netif_dbg(efx, hw, efx->net_dev,
+ "MCDI proxy timeout %d\n", handle);
+ return -ETIMEDOUT;
+ } else if (mcdi->proxy_rx_handle != handle) {
+ netif_warn(efx, hw, efx->net_dev,
+ "MCDI proxy unexpected handle %d (expected %d)\n",
+ mcdi->proxy_rx_handle, handle);
+ return -EINVAL;
+ }
+
+ return mcdi->proxy_rx_status;
+}
+
+static int _efx_mcdi_rpc(struct efx_nic *efx, unsigned int cmd,
const efx_dword_t *inbuf, size_t inlen,
efx_dword_t *outbuf, size_t outlen,
- size_t *outlen_actual, bool quiet)
+ size_t *outlen_actual, bool quiet, int *raw_rc)
{
+ u32 proxy_handle = 0; /* Zero is an invalid proxy handle. */
int rc;
+ if (inbuf && inlen && (inbuf == outbuf)) {
+ /* The input buffer can't be aliased with the output. */
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
- if (rc) {
- if (outlen_actual)
- *outlen_actual = 0;
+ if (rc)
return rc;
+
+ rc = _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
+ outlen_actual, quiet, &proxy_handle, raw_rc);
+
+ if (proxy_handle) {
+ /* Handle proxy authorisation. This allows approval of MCDI
+ * operations to be delegated to the admin function, allowing
+ * fine control over (eg) multicast subscriptions.
+ */
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+ netif_dbg(efx, hw, efx->net_dev,
+ "MCDI waiting for proxy auth %d\n",
+ proxy_handle);
+ rc = efx_mcdi_proxy_wait(efx, proxy_handle, quiet);
+
+ if (rc == 0) {
+ netif_dbg(efx, hw, efx->net_dev,
+ "MCDI proxy retry %d\n", proxy_handle);
+
+ /* We now retry the original request. */
+ mcdi->state = MCDI_STATE_RUNNING_SYNC;
+ efx_mcdi_send_request(efx, cmd, inbuf, inlen);
+
+ rc = _efx_mcdi_rpc_finish(efx, cmd, inlen,
+ outbuf, outlen, outlen_actual,
+ quiet, NULL, raw_rc);
+ } else {
+ netif_printk(efx, hw,
+ rc == -EPERM ? KERN_DEBUG : KERN_ERR,
+ efx->net_dev,
+ "MC command 0x%x failed after proxy auth rc=%d\n",
+ cmd, rc);
+
+ if (rc == -EINTR || rc == -EIO)
+ efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
+ efx_mcdi_release(mcdi);
+ }
}
- return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
- outlen_actual, quiet);
+
+ return rc;
}
+static int _efx_mcdi_rpc_evb_retry(struct efx_nic *efx, unsigned cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual, bool quiet)
+{
+ int raw_rc = 0;
+ int rc;
+
+ rc = _efx_mcdi_rpc(efx, cmd, inbuf, inlen,
+ outbuf, outlen, outlen_actual, true, &raw_rc);
+
+ if ((rc == -EPROTO) && (raw_rc == MC_CMD_ERR_NO_EVB_PORT) &&
+ efx->type->is_vf) {
+ /* If the EVB port isn't available within a VF this may
+ * mean the PF is still bringing the switch up. We should
+ * retry our request shortly.
+ */
+ unsigned long abort_time = jiffies + MCDI_RPC_TIMEOUT;
+ unsigned int delay_us = 10000;
+
+ netif_dbg(efx, hw, efx->net_dev,
+ "%s: NO_EVB_PORT; will retry request\n",
+ __func__);
+
+ do {
+ usleep_range(delay_us, delay_us + 10000);
+ rc = _efx_mcdi_rpc(efx, cmd, inbuf, inlen,
+ outbuf, outlen, outlen_actual,
+ true, &raw_rc);
+ if (delay_us < 100000)
+ delay_us <<= 1;
+ } while ((rc == -EPROTO) &&
+ (raw_rc == MC_CMD_ERR_NO_EVB_PORT) &&
+ time_before(jiffies, abort_time));
+ }
+
+ if (rc && !quiet && !(cmd == MC_CMD_REBOOT && rc == -EIO))
+ efx_mcdi_display_error(efx, cmd, inlen,
+ outbuf, outlen, rc);
+
+ return rc;
+}
+
+/**
+ * efx_mcdi_rpc - Issue an MCDI command and wait for completion
+ * @efx: NIC through which to issue the command
+ * @cmd: Command type number
+ * @inbuf: Command parameters
+ * @inlen: Length of command parameters, in bytes. Must be a multiple
+ * of 4 and no greater than %MCDI_CTL_SDU_LEN_MAX_V1.
+ * @outbuf: Response buffer. May be %NULL if @outlen is 0.
+ * @outlen: Length of response buffer, in bytes. If the actual
+ * response is longer than @outlen & ~3, it will be truncated
+ * to that length.
+ * @outlen_actual: Pointer through which to return the actual response
+ * length. May be %NULL if this is not needed.
+ *
+ * This function may sleep and therefore must be called in an appropriate
+ * context.
+ *
+ * Return: A negative error code, or zero if successful. The error
+ * code may come from the MCDI response or may indicate a failure
+ * to communicate with the MC. In the former case, the response
+ * will still be copied to @outbuf and *@outlen_actual will be
+ * set accordingly. In the latter case, *@outlen_actual will be
+ * set to zero.
+ */
int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
const efx_dword_t *inbuf, size_t inlen,
efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual)
{
- return _efx_mcdi_rpc(efx, cmd, inbuf, inlen, outbuf, outlen,
- outlen_actual, false);
+ return _efx_mcdi_rpc_evb_retry(efx, cmd, inbuf, inlen, outbuf, outlen,
+ outlen_actual, false);
}
/* Normally, on receiving an error code in the MCDI response,
@@ -744,8 +939,8 @@ int efx_mcdi_rpc_quiet(struct efx_nic *efx, unsigned cmd,
efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual)
{
- return _efx_mcdi_rpc(efx, cmd, inbuf, inlen, outbuf, outlen,
- outlen_actual, true);
+ return _efx_mcdi_rpc_evb_retry(efx, cmd, inbuf, inlen, outbuf, outlen,
+ outlen_actual, true);
}
int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
@@ -866,7 +1061,7 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
size_t *outlen_actual)
{
return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
- outlen_actual, false);
+ outlen_actual, false, NULL, NULL);
}
int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd, size_t inlen,
@@ -874,7 +1069,7 @@ int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd, size_t inlen,
size_t *outlen_actual)
{
return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
- outlen_actual, true);
+ outlen_actual, true, NULL, NULL);
}
void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd,
@@ -887,9 +1082,10 @@ void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd,
code = MCDI_DWORD(outbuf, ERR_CODE);
if (outlen >= MC_CMD_ERR_ARG_OFST + 4)
err_arg = MCDI_DWORD(outbuf, ERR_ARG);
- netif_err(efx, hw, efx->net_dev,
- "MC command 0x%x inlen %d failed rc=%d (raw=%d) arg=%d\n",
- cmd, (int)inlen, rc, code, err_arg);
+ netif_printk(efx, hw, rc == -EPERM ? KERN_DEBUG : KERN_ERR,
+ efx->net_dev,
+ "MC command 0x%x inlen %zu failed rc=%d (raw=%d) arg=%d\n",
+ cmd, inlen, rc, code, err_arg);
}
/* Switch to polled MCDI completions. This can be called in various
@@ -1014,8 +1210,13 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
* receiving a REBOOT event after posting the MCDI
* request. Did the mc reboot before or after the copyout? The
* best we can do always is just return failure.
+ *
+ * If there is an outstanding proxy response expected it is not going
+ * to arrive. We should thus abort it.
*/
spin_lock(&mcdi->iface_lock);
+ efx_mcdi_proxy_abort(mcdi);
+
if (efx_mcdi_complete_sync(mcdi)) {
if (mcdi->mode == MCDI_MODE_EVENTS) {
mcdi->resprc = rc;
@@ -1063,6 +1264,8 @@ static void efx_mcdi_ev_bist(struct efx_nic *efx)
spin_lock(&mcdi->iface_lock);
efx->mc_bist_for_other_fn = true;
+ efx_mcdi_proxy_abort(mcdi);
+
if (efx_mcdi_complete_sync(mcdi)) {
if (mcdi->mode == MCDI_MODE_EVENTS) {
mcdi->resprc = -EIO;
@@ -1171,6 +1374,11 @@ void efx_mcdi_process_event(struct efx_channel *channel,
EFX_QWORD_VAL(*event));
efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
break;
+ case MCDI_EVENT_CODE_PROXY_RESPONSE:
+ efx_mcdi_ev_proxy_response(efx,
+ MCDI_EVENT_FIELD(*event, PROXY_RESPONSE_HANDLE),
+ MCDI_EVENT_FIELD(*event, PROXY_RESPONSE_RC));
+ break;
default:
netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n",
code);
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 025d504..c9aeb07 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -17,6 +17,8 @@
* @MCDI_STATE_RUNNING_SYNC: There is a synchronous MCDI request pending.
* Only the thread that moved into this state is allowed to move out of it.
* @MCDI_STATE_RUNNING_ASYNC: There is an asynchronous MCDI request pending.
+ * @MCDI_STATE_PROXY_WAIT: An MCDI request has completed with a response that
+ * indicates we must wait for a proxy try again message.
* @MCDI_STATE_COMPLETED: An MCDI request has completed, but the owning thread
* has not yet consumed the result. For all other threads, equivalent to
* %MCDI_STATE_RUNNING.
@@ -25,6 +27,7 @@ enum efx_mcdi_state {
MCDI_STATE_QUIESCENT,
MCDI_STATE_RUNNING_SYNC,
MCDI_STATE_RUNNING_ASYNC,
+ MCDI_STATE_PROXY_WAIT,
MCDI_STATE_COMPLETED,
};
@@ -60,6 +63,9 @@ enum efx_mcdi_mode {
* @async_timer: Timer for asynchronous request timeout
* @logging_buffer: buffer that may be used to build MCDI tracing messages
* @logging_enabled: whether to trace MCDI
+ * @proxy_rx_handle: Most recently received proxy authorisation handle
+ * @proxy_rx_status: Status of most recent proxy authorisation
+ * @proxy_rx_wq: Wait queue for updates to proxy_rx_handle
*/
struct efx_mcdi_iface {
struct efx_nic *efx;
@@ -71,6 +77,7 @@ struct efx_mcdi_iface {
unsigned int credits;
unsigned int seqno;
int resprc;
+ int resprc_raw;
size_t resp_hdr_len;
size_t resp_data_len;
spinlock_t async_lock;
@@ -80,6 +87,9 @@ struct efx_mcdi_iface {
char *logging_buffer;
bool logging_enabled;
#endif
+ unsigned int proxy_rx_handle;
+ int proxy_rx_status;
+ wait_queue_head_t proxy_rx_wq;
};
struct efx_mcdi_mon {
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 92d08eb..c61d66d 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -582,6 +582,7 @@ static void netcp_free_rx_desc_chain(struct netcp_intf *netcp,
unsigned int buf_len, dma_sz = sizeof(*ndesc);
void *buf_ptr;
u32 pad[2];
+ u32 tmp;
get_words(&dma_desc, 1, &desc->next_desc);
@@ -591,6 +592,7 @@ static void netcp_free_rx_desc_chain(struct netcp_intf *netcp,
dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
break;
}
+ get_pkt_info(&dma_buf, &tmp, &dma_desc, ndesc);
get_pad_ptr(&buf_ptr, ndesc);
dma_unmap_page(netcp->dev, dma_buf, PAGE_SIZE, DMA_FROM_DEVICE);
__free_page(buf_ptr);
@@ -637,6 +639,7 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
dma_addr_t dma_desc, dma_buff;
struct netcp_packet p_info;
struct sk_buff *skb;
+ u32 pad[2];
void *org_buf_ptr;
dma_desc = knav_queue_pop(netcp->rx_queue, &dma_sz);
@@ -650,7 +653,8 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
}
get_pkt_info(&dma_buff, &buf_len, &dma_desc, desc);
- get_pad_ptr(&org_buf_ptr, desc);
+ get_pad_info(&pad[0], &pad[1], &org_buf_len, desc);
+ org_buf_ptr = (void *)(uintptr_t)(pad[0] + ((u64)pad[1] << 32));
if (unlikely(!org_buf_ptr)) {
dev_err(netcp->ndev_dev, "NULL bufptr in desc\n");
@@ -684,7 +688,7 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
}
get_pkt_info(&dma_buff, &buf_len, &dma_desc, ndesc);
- get_pad_ptr(ptr, ndesc);
+ get_pad_ptr(&ptr, ndesc);
page = ptr;
if (likely(dma_buff && buf_len && page)) {
@@ -773,7 +777,7 @@ static void netcp_free_rx_buf(struct netcp_intf *netcp, int fdq)
}
get_org_pkt_info(&dma, &buf_len, desc);
- get_pad_ptr(buf_ptr, desc);
+ get_pad_ptr(&buf_ptr, desc);
if (unlikely(!dma)) {
dev_err(netcp->ndev_dev, "NULL orig_buff in desc\n");
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index e6e0092..20dd664 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -918,12 +918,11 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
df = 0;
}
- err = udp_tunnel_xmit_skb(rt, gs4->sock->sk, skb, fl4.saddr, fl4.daddr,
- tos, ttl, df, sport, geneve->dst_port,
- !net_eq(geneve->net, dev_net(geneve->dev)),
- !(flags & GENEVE_F_UDP_CSUM));
+ udp_tunnel_xmit_skb(rt, gs4->sock->sk, skb, fl4.saddr, fl4.daddr,
+ tos, ttl, df, sport, geneve->dst_port,
+ !net_eq(geneve->net, dev_net(geneve->dev)),
+ !(flags & GENEVE_F_UDP_CSUM));
- iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
return NETDEV_TX_OK;
tx_error:
@@ -1005,10 +1004,10 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
ttl = 1;
ttl = ttl ? : ip6_dst_hoplimit(dst);
}
- err = udp_tunnel6_xmit_skb(dst, gs6->sock->sk, skb, dev,
- &fl6.saddr, &fl6.daddr, prio, ttl,
- sport, geneve->dst_port,
- !!(flags & GENEVE_F_UDP_ZERO_CSUM6_TX));
+ udp_tunnel6_xmit_skb(dst, gs6->sock->sk, skb, dev,
+ &fl6.saddr, &fl6.daddr, prio, ttl,
+ sport, geneve->dst_port,
+ !!(flags & GENEVE_F_UDP_ZERO_CSUM6_TX));
return NETDEV_TX_OK;
tx_error:
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 0240552..50b5eac 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -137,6 +137,22 @@ MODULE_DESCRIPTION("Marvell PHY driver");
MODULE_AUTHOR("Andy Fleming");
MODULE_LICENSE("GPL");
+struct marvell_hw_stat {
+ const char *string;
+ u8 page;
+ u8 reg;
+ u8 bits;
+};
+
+static struct marvell_hw_stat marvell_hw_stats[] = {
+ { "phy_receive_errors", 0, 21, 16},
+ { "phy_idle_errors", 0, 10, 8 },
+};
+
+struct marvell_priv {
+ u64 stats[ARRAY_SIZE(marvell_hw_stats)];
+};
+
static int marvell_ack_interrupt(struct phy_device *phydev)
{
int err;
@@ -986,12 +1002,80 @@ static int m88e1318_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *w
return 0;
}
+static int marvell_get_sset_count(struct phy_device *phydev)
+{
+ return ARRAY_SIZE(marvell_hw_stats);
+}
+
+static void marvell_get_strings(struct phy_device *phydev, u8 *data)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++) {
+ memcpy(data + i * ETH_GSTRING_LEN,
+ marvell_hw_stats[i].string, ETH_GSTRING_LEN);
+ }
+}
+
+#ifndef UINT64_MAX
+#define UINT64_MAX (u64)(~((u64)0))
+#endif
+static u64 marvell_get_stat(struct phy_device *phydev, int i)
+{
+ struct marvell_hw_stat stat = marvell_hw_stats[i];
+ struct marvell_priv *priv = phydev->priv;
+ int err, oldpage;
+ u64 val;
+
+ oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE,
+ stat.page);
+ if (err < 0)
+ return UINT64_MAX;
+
+ val = phy_read(phydev, stat.reg);
+ if (val < 0) {
+ val = UINT64_MAX;
+ } else {
+ val = val & ((1 << stat.bits) - 1);
+ priv->stats[i] += val;
+ val = priv->stats[i];
+ }
+
+ phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
+
+ return val;
+}
+
+static void marvell_get_stats(struct phy_device *phydev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++)
+ data[i] = marvell_get_stat(phydev, i);
+}
+
+static int marvell_probe(struct phy_device *phydev)
+{
+ struct marvell_priv *priv;
+
+ priv = devm_kzalloc(&phydev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
+ return 0;
+}
+
static struct phy_driver marvell_drivers[] = {
{
.phy_id = MARVELL_PHY_ID_88E1101,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1101",
.features = PHY_GBIT_FEATURES,
+ .probe = marvell_probe,
.flags = PHY_HAS_INTERRUPT,
.config_aneg = &marvell_config_aneg,
.read_status = &genphy_read_status,
@@ -999,6 +1083,9 @@ static struct phy_driver marvell_drivers[] = {
.config_intr = &marvell_config_intr,
.resume = &genphy_resume,
.suspend = &genphy_suspend,
+ .get_sset_count = marvell_get_sset_count,
+ .get_strings = marvell_get_strings,
+ .get_stats = marvell_get_stats,
.driver = { .owner = THIS_MODULE },
},
{
@@ -1007,6 +1094,7 @@ static struct phy_driver marvell_drivers[] = {
.name = "Marvell 88E1112",
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
+ .probe = marvell_probe,
.config_init = &m88e1111_config_init,
.config_aneg = &marvell_config_aneg,
.read_status = &genphy_read_status,
@@ -1014,6 +1102,9 @@ static struct phy_driver marvell_drivers[] = {
.config_intr = &marvell_config_intr,
.resume = &genphy_resume,
.suspend = &genphy_suspend,
+ .get_sset_count = marvell_get_sset_count,
+ .get_strings = marvell_get_strings,
+ .get_stats = marvell_get_stats,
.driver = { .owner = THIS_MODULE },
},
{
@@ -1022,6 +1113,7 @@ static struct phy_driver marvell_drivers[] = {
.name = "Marvell 88E1111",
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
+ .probe = marvell_probe,
.config_init = &m88e1111_config_init,
.config_aneg = &marvell_config_aneg,
.read_status = &marvell_read_status,
@@ -1029,6 +1121,9 @@ static struct phy_driver marvell_drivers[] = {
.config_intr = &marvell_config_intr,
.resume = &genphy_resume,
.suspend = &genphy_suspend,
+ .get_sset_count = marvell_get_sset_count,
+ .get_strings = marvell_get_strings,
+ .get_stats = marvell_get_stats,
.driver = { .owner = THIS_MODULE },
},
{
@@ -1037,6 +1132,7 @@ static struct phy_driver marvell_drivers[] = {
.name = "Marvell 88E1118",
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
+ .probe = marvell_probe,
.config_init = &m88e1118_config_init,
.config_aneg = &m88e1118_config_aneg,
.read_status = &genphy_read_status,
@@ -1044,6 +1140,9 @@ static struct phy_driver marvell_drivers[] = {
.config_intr = &marvell_config_intr,
.resume = &genphy_resume,
.suspend = &genphy_suspend,
+ .get_sset_count = marvell_get_sset_count,
+ .get_strings = marvell_get_strings,
+ .get_stats = marvell_get_stats,
.driver = {.owner = THIS_MODULE,},
},
{
@@ -1052,6 +1151,7 @@ static struct phy_driver marvell_drivers[] = {
.name = "Marvell 88E1121R",
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
+ .probe = marvell_probe,
.config_aneg = &m88e1121_config_aneg,
.read_status = &marvell_read_status,
.ack_interrupt = &marvell_ack_interrupt,
@@ -1059,6 +1159,9 @@ static struct phy_driver marvell_drivers[] = {
.did_interrupt = &m88e1121_did_interrupt,
.resume = &genphy_resume,
.suspend = &genphy_suspend,
+ .get_sset_count = marvell_get_sset_count,
+ .get_strings = marvell_get_strings,
+ .get_stats = marvell_get_stats,
.driver = { .owner = THIS_MODULE },
},
{
@@ -1067,6 +1170,7 @@ static struct phy_driver marvell_drivers[] = {
.name = "Marvell 88E1318S",
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
+ .probe = marvell_probe,
.config_aneg = &m88e1318_config_aneg,
.read_status = &marvell_read_status,
.ack_interrupt = &marvell_ack_interrupt,
@@ -1076,6 +1180,9 @@ static struct phy_driver marvell_drivers[] = {
.set_wol = &m88e1318_set_wol,
.resume = &genphy_resume,
.suspend = &genphy_suspend,
+ .get_sset_count = marvell_get_sset_count,
+ .get_strings = marvell_get_strings,
+ .get_stats = marvell_get_stats,
.driver = { .owner = THIS_MODULE },
},
{
@@ -1084,6 +1191,7 @@ static struct phy_driver marvell_drivers[] = {
.name = "Marvell 88E1145",
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
+ .probe = marvell_probe,
.config_init = &m88e1145_config_init,
.config_aneg = &marvell_config_aneg,
.read_status = &genphy_read_status,
@@ -1091,6 +1199,9 @@ static struct phy_driver marvell_drivers[] = {
.config_intr = &marvell_config_intr,
.resume = &genphy_resume,
.suspend = &genphy_suspend,
+ .get_sset_count = marvell_get_sset_count,
+ .get_strings = marvell_get_strings,
+ .get_stats = marvell_get_stats,
.driver = { .owner = THIS_MODULE },
},
{
@@ -1099,6 +1210,7 @@ static struct phy_driver marvell_drivers[] = {
.name = "Marvell 88E1149R",
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
+ .probe = marvell_probe,
.config_init = &m88e1149_config_init,
.config_aneg = &m88e1118_config_aneg,
.read_status = &genphy_read_status,
@@ -1106,6 +1218,9 @@ static struct phy_driver marvell_drivers[] = {
.config_intr = &marvell_config_intr,
.resume = &genphy_resume,
.suspend = &genphy_suspend,
+ .get_sset_count = marvell_get_sset_count,
+ .get_strings = marvell_get_strings,
+ .get_stats = marvell_get_stats,
.driver = { .owner = THIS_MODULE },
},
{
@@ -1114,6 +1229,7 @@ static struct phy_driver marvell_drivers[] = {
.name = "Marvell 88E1240",
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
+ .probe = marvell_probe,
.config_init = &m88e1111_config_init,
.config_aneg = &marvell_config_aneg,
.read_status = &genphy_read_status,
@@ -1121,6 +1237,9 @@ static struct phy_driver marvell_drivers[] = {
.config_intr = &marvell_config_intr,
.resume = &genphy_resume,
.suspend = &genphy_suspend,
+ .get_sset_count = marvell_get_sset_count,
+ .get_strings = marvell_get_strings,
+ .get_stats = marvell_get_stats,
.driver = { .owner = THIS_MODULE },
},
{
@@ -1129,6 +1248,7 @@ static struct phy_driver marvell_drivers[] = {
.name = "Marvell 88E1116R",
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
+ .probe = marvell_probe,
.config_init = &m88e1116r_config_init,
.config_aneg = &genphy_config_aneg,
.read_status = &genphy_read_status,
@@ -1136,6 +1256,9 @@ static struct phy_driver marvell_drivers[] = {
.config_intr = &marvell_config_intr,
.resume = &genphy_resume,
.suspend = &genphy_suspend,
+ .get_sset_count = marvell_get_sset_count,
+ .get_strings = marvell_get_strings,
+ .get_stats = marvell_get_stats,
.driver = { .owner = THIS_MODULE },
},
{
@@ -1144,6 +1267,7 @@ static struct phy_driver marvell_drivers[] = {
.name = "Marvell 88E1510",
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
+ .probe = marvell_probe,
.config_aneg = &m88e1510_config_aneg,
.read_status = &marvell_read_status,
.ack_interrupt = &marvell_ack_interrupt,
@@ -1151,6 +1275,9 @@ static struct phy_driver marvell_drivers[] = {
.did_interrupt = &m88e1121_did_interrupt,
.resume = &genphy_resume,
.suspend = &genphy_suspend,
+ .get_sset_count = marvell_get_sset_count,
+ .get_strings = marvell_get_strings,
+ .get_stats = marvell_get_stats,
.driver = { .owner = THIS_MODULE },
},
{
@@ -1159,6 +1286,7 @@ static struct phy_driver marvell_drivers[] = {
.name = "Marvell 88E1540",
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
+ .probe = marvell_probe,
.config_aneg = &m88e1510_config_aneg,
.read_status = &marvell_read_status,
.ack_interrupt = &marvell_ack_interrupt,
@@ -1166,6 +1294,9 @@ static struct phy_driver marvell_drivers[] = {
.did_interrupt = &m88e1121_did_interrupt,
.resume = &genphy_resume,
.suspend = &genphy_suspend,
+ .get_sset_count = marvell_get_sset_count,
+ .get_strings = marvell_get_strings,
+ .get_stats = marvell_get_stats,
.driver = { .owner = THIS_MODULE },
},
{
@@ -1174,6 +1305,7 @@ static struct phy_driver marvell_drivers[] = {
.name = "Marvell 88E3016",
.features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
+ .probe = marvell_probe,
.config_aneg = &genphy_config_aneg,
.config_init = &m88e3016_config_init,
.aneg_done = &marvell_aneg_done,
@@ -1183,6 +1315,9 @@ static struct phy_driver marvell_drivers[] = {
.did_interrupt = &m88e1121_did_interrupt,
.resume = &genphy_resume,
.suspend = &genphy_suspend,
+ .get_sset_count = marvell_get_sset_count,
+ .get_strings = marvell_get_strings,
+ .get_stats = marvell_get_stats,
.driver = { .owner = THIS_MODULE },
},
};
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index e13ad6c..1a6048a 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -73,6 +73,17 @@
#define PS_TO_REG 200
+struct kszphy_hw_stat {
+ const char *string;
+ u8 reg;
+ u8 bits;
+};
+
+static struct kszphy_hw_stat kszphy_hw_stats[] = {
+ { "phy_receive_errors", 21, 16},
+ { "phy_idle_errors", 10, 8 },
+};
+
struct kszphy_type {
u32 led_mode_reg;
u16 interrupt_level_mask;
@@ -86,6 +97,7 @@ struct kszphy_priv {
int led_mode;
bool rmii_ref_clk_sel;
bool rmii_ref_clk_sel_val;
+ u64 stats[ARRAY_SIZE(kszphy_hw_stats)];
};
static const struct kszphy_type ksz8021_type = {
@@ -569,6 +581,51 @@ ksz9021_wr_mmd_phyreg(struct phy_device *phydev, int ptrad, int devnum,
{
}
+static int kszphy_get_sset_count(struct phy_device *phydev)
+{
+ return ARRAY_SIZE(kszphy_hw_stats);
+}
+
+static void kszphy_get_strings(struct phy_device *phydev, u8 *data)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(kszphy_hw_stats); i++) {
+ memcpy(data + i * ETH_GSTRING_LEN,
+ kszphy_hw_stats[i].string, ETH_GSTRING_LEN);
+ }
+}
+
+#ifndef UINT64_MAX
+#define UINT64_MAX (u64)(~((u64)0))
+#endif
+static u64 kszphy_get_stat(struct phy_device *phydev, int i)
+{
+ struct kszphy_hw_stat stat = kszphy_hw_stats[i];
+ struct kszphy_priv *priv = phydev->priv;
+ u64 val;
+
+ val = phy_read(phydev, stat.reg);
+ if (val < 0) {
+ val = UINT64_MAX;
+ } else {
+ val = val & ((1 << stat.bits) - 1);
+ priv->stats[i] += val;
+ val = priv->stats[i];
+ }
+
+ return val;
+}
+
+static void kszphy_get_stats(struct phy_device *phydev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(kszphy_hw_stats); i++)
+ data[i] = kszphy_get_stat(phydev, i);
+}
+
static int kszphy_probe(struct phy_device *phydev)
{
const struct kszphy_type *type = phydev->drv->driver_data;
@@ -642,6 +699,9 @@ static struct phy_driver ksphy_driver[] = {
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
+ .get_sset_count = kszphy_get_sset_count,
+ .get_strings = kszphy_get_strings,
+ .get_stats = kszphy_get_stats,
.suspend = genphy_suspend,
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
@@ -659,6 +719,9 @@ static struct phy_driver ksphy_driver[] = {
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
+ .get_sset_count = kszphy_get_sset_count,
+ .get_strings = kszphy_get_strings,
+ .get_stats = kszphy_get_stats,
.suspend = genphy_suspend,
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
@@ -676,6 +739,9 @@ static struct phy_driver ksphy_driver[] = {
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
+ .get_sset_count = kszphy_get_sset_count,
+ .get_strings = kszphy_get_strings,
+ .get_stats = kszphy_get_stats,
.suspend = genphy_suspend,
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
@@ -693,6 +759,9 @@ static struct phy_driver ksphy_driver[] = {
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
+ .get_sset_count = kszphy_get_sset_count,
+ .get_strings = kszphy_get_strings,
+ .get_stats = kszphy_get_stats,
.suspend = genphy_suspend,
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
@@ -710,6 +779,9 @@ static struct phy_driver ksphy_driver[] = {
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
+ .get_sset_count = kszphy_get_sset_count,
+ .get_strings = kszphy_get_strings,
+ .get_stats = kszphy_get_stats,
.suspend = genphy_suspend,
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
@@ -727,6 +799,9 @@ static struct phy_driver ksphy_driver[] = {
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
+ .get_sset_count = kszphy_get_sset_count,
+ .get_strings = kszphy_get_strings,
+ .get_stats = kszphy_get_stats,
.suspend = genphy_suspend,
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
@@ -743,6 +818,9 @@ static struct phy_driver ksphy_driver[] = {
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
+ .get_sset_count = kszphy_get_sset_count,
+ .get_strings = kszphy_get_strings,
+ .get_stats = kszphy_get_stats,
.suspend = genphy_suspend,
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
@@ -759,6 +837,9 @@ static struct phy_driver ksphy_driver[] = {
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
+ .get_sset_count = kszphy_get_sset_count,
+ .get_strings = kszphy_get_strings,
+ .get_stats = kszphy_get_stats,
.suspend = genphy_suspend,
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
@@ -773,6 +854,9 @@ static struct phy_driver ksphy_driver[] = {
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
+ .get_sset_count = kszphy_get_sset_count,
+ .get_strings = kszphy_get_strings,
+ .get_stats = kszphy_get_stats,
.suspend = genphy_suspend,
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
@@ -788,6 +872,9 @@ static struct phy_driver ksphy_driver[] = {
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
+ .get_sset_count = kszphy_get_sset_count,
+ .get_strings = kszphy_get_strings,
+ .get_stats = kszphy_get_stats,
.suspend = genphy_suspend,
.resume = genphy_resume,
.read_mmd_indirect = ksz9021_rd_mmd_phyreg,
@@ -805,6 +892,9 @@ static struct phy_driver ksphy_driver[] = {
.read_status = ksz9031_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
+ .get_sset_count = kszphy_get_sset_count,
+ .get_strings = kszphy_get_strings,
+ .get_stats = kszphy_get_stats,
.suspend = genphy_suspend,
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE, },
@@ -817,6 +907,9 @@ static struct phy_driver ksphy_driver[] = {
.config_init = kszphy_config_init,
.config_aneg = ksz8873mll_config_aneg,
.read_status = ksz8873mll_read_status,
+ .get_sset_count = kszphy_get_sset_count,
+ .get_strings = kszphy_get_strings,
+ .get_stats = kszphy_get_stats,
.suspend = genphy_suspend,
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE, },
@@ -829,6 +922,9 @@ static struct phy_driver ksphy_driver[] = {
.config_init = kszphy_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
+ .get_sset_count = kszphy_get_sset_count,
+ .get_strings = kszphy_get_strings,
+ .get_stats = kszphy_get_stats,
.suspend = genphy_suspend,
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE, },
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index bd9acff..0c5c22b 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -118,7 +118,7 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
return 0;
}
if (size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) {
- netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
+ netdev_dbg(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
size);
return 0;
}
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index ba363ce..fecf7b6 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1841,9 +1841,10 @@ static int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *sk
skb_set_inner_protocol(skb, htons(ETH_P_TEB));
- return udp_tunnel_xmit_skb(rt, sk, skb, src, dst, tos,
- ttl, df, src_port, dst_port, xnet,
- !(vxflags & VXLAN_F_UDP_CSUM));
+ udp_tunnel_xmit_skb(rt, sk, skb, src, dst, tos, ttl, df,
+ src_port, dst_port, xnet,
+ !(vxflags & VXLAN_F_UDP_CSUM));
+ return 0;
}
#if IS_ENABLED(CONFIG_IPV6)
@@ -2056,8 +2057,6 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
skb = NULL;
goto rt_tx_error;
}
-
- iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
#if IS_ENABLED(CONFIG_IPV6)
} else {
struct dst_entry *ndst;