summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/qlogic
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/qlogic')
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig31
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/Makefile4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h84
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c186
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.h13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c562
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.h80
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c740
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h33
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h147
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c67
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.h10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c167
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c160
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h36
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c717
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.h239
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c282
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c356
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h80
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h53
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_selftest.c76
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_selftest.h40
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h36
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c322
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c19
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c3613
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.h386
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c1102
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.h990
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h45
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c625
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c501
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c2
37 files changed, 11127 insertions, 693 deletions
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index ddcfcab..680d8c7 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -98,9 +98,40 @@ config QED
---help---
This enables the support for ...
+config QED_SRIOV
+ bool "QLogic QED 25/40/100Gb SR-IOV support"
+ depends on QED && PCI_IOV
+ default y
+ ---help---
+ This configuration parameter enables Single Root Input Output
+ Virtualization support for QED devices.
+ This allows for virtual function acceleration in virtualized
+ environments.
+
config QEDE
tristate "QLogic QED 25/40/100Gb Ethernet NIC"
depends on QED
---help---
This enables the support for ...
+
+config QEDE_VXLAN
+ bool "Virtual eXtensible Local Area Network support"
+ default n
+ depends on QEDE && VXLAN && !(QEDE=y && VXLAN=m)
+ ---help---
+ This enables hardware offload support for VXLAN protocol over
+ qede module. Say Y here if you want to enable hardware offload
+ support for Virtual eXtensible Local Area Network (VXLAN)
+ in the driver.
+
+config QEDE_GENEVE
+ bool "Generic Network Virtualization Encapsulation (GENEVE) support"
+ depends on QEDE && GENEVE && !(QEDE=y && GENEVE=m)
+ ---help---
+ This allows one to create GENEVE virtual interfaces that provide
+ Layer 2 Networks over Layer 3 Networks. GENEVE is often used
+ to tunnel virtual network infrastructure in virtualized environments.
+ Say Y here if you want to enable hardware offload support for
+ Generic Network Virtualization Encapsulation (GENEVE) in the driver.
+
endif # NET_VENDOR_QLOGIC
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 9c6eed9..7a0281a 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -2286,7 +2286,7 @@ static void netxen_tx_timeout_task(struct work_struct *work)
goto request_reset;
}
}
- adapter->netdev->trans_start = jiffies;
+ netif_trans_update(adapter->netdev);
rtnl_unlock();
return;
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
index 5c2fd57..d1f157e 100644
--- a/drivers/net/ethernet/qlogic/qed/Makefile
+++ b/drivers/net/ethernet/qlogic/qed/Makefile
@@ -1,4 +1,6 @@
obj-$(CONFIG_QED) := qed.o
qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
- qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o
+ qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o \
+ qed_selftest.o qed_dcbx.o
+qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index fcb8e9b..1042f2a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -26,12 +26,14 @@
#include "qed_hsi.h"
extern const struct qed_common_ops qed_common_ops_pass;
-#define DRV_MODULE_VERSION "8.7.0.0"
+#define DRV_MODULE_VERSION "8.7.1.20"
#define MAX_HWFNS_PER_DEVICE (4)
#define NAME_SIZE 16
#define VER_SIZE 16
+#define QED_WFQ_UNIT 100
+
/* cau states */
enum qed_coalescing_mode {
QED_COAL_MODE_DISABLE,
@@ -74,6 +76,51 @@ struct qed_rt_data {
bool *b_valid;
};
+enum qed_tunn_mode {
+ QED_MODE_L2GENEVE_TUNN,
+ QED_MODE_IPGENEVE_TUNN,
+ QED_MODE_L2GRE_TUNN,
+ QED_MODE_IPGRE_TUNN,
+ QED_MODE_VXLAN_TUNN,
+};
+
+enum qed_tunn_clss {
+ QED_TUNN_CLSS_MAC_VLAN,
+ QED_TUNN_CLSS_MAC_VNI,
+ QED_TUNN_CLSS_INNER_MAC_VLAN,
+ QED_TUNN_CLSS_INNER_MAC_VNI,
+ MAX_QED_TUNN_CLSS,
+};
+
+struct qed_tunn_start_params {
+ unsigned long tunn_mode;
+ u16 vxlan_udp_port;
+ u16 geneve_udp_port;
+ u8 update_vxlan_udp_port;
+ u8 update_geneve_udp_port;
+ u8 tunn_clss_vxlan;
+ u8 tunn_clss_l2geneve;
+ u8 tunn_clss_ipgeneve;
+ u8 tunn_clss_l2gre;
+ u8 tunn_clss_ipgre;
+};
+
+struct qed_tunn_update_params {
+ unsigned long tunn_mode_update_mask;
+ unsigned long tunn_mode;
+ u16 vxlan_udp_port;
+ u16 geneve_udp_port;
+ u8 update_rx_pf_clss;
+ u8 update_tx_pf_clss;
+ u8 update_vxlan_udp_port;
+ u8 update_geneve_udp_port;
+ u8 tunn_clss_vxlan;
+ u8 tunn_clss_l2geneve;
+ u8 tunn_clss_ipgeneve;
+ u8 tunn_clss_l2gre;
+ u8 tunn_clss_ipgre;
+};
+
/* The PCI personality is not quite synonymous to protocol ID:
* 1. All personalities need CORE connections
* 2. The Ethernet personality may support also the RoCE protocol
@@ -105,6 +152,7 @@ enum QED_RESOURCES {
enum QED_FEATURE {
QED_PF_L2_QUE,
+ QED_VF,
QED_MAX_FEATURES,
};
@@ -192,6 +240,12 @@ struct qed_dmae_info {
struct dmae_cmd *p_dmae_cmd;
};
+struct qed_wfq_data {
+ /* when feature is configured for at least 1 vport */
+ u32 min_speed;
+ bool configured;
+};
+
struct qed_qm_info {
struct init_qm_pq_params *qm_pq_params;
struct init_qm_vport_params *qm_vport_params;
@@ -212,6 +266,7 @@ struct qed_qm_info {
bool vport_wfq_en;
u8 pf_wfq;
u32 pf_rl;
+ struct qed_wfq_data *wfq_data;
};
struct storm_stats {
@@ -256,6 +311,8 @@ struct qed_hwfn {
bool first_on_engine;
bool hw_init_done;
+ u8 num_funcs_on_engine;
+
/* BAR access */
void __iomem *regview;
void __iomem *doorbells;
@@ -306,8 +363,12 @@ struct qed_hwfn {
/* True if the driver requests for the link */
bool b_drv_link_init;
+ struct qed_vf_iov *vf_iov_info;
+ struct qed_pf_iov *pf_iov_info;
struct qed_mcp_info *mcp_info;
+ struct qed_dcbx_info *p_dcbx_info;
+
struct qed_hw_cid_data *p_tx_cids;
struct qed_hw_cid_data *p_rx_cids;
@@ -322,6 +383,12 @@ struct qed_hwfn {
struct qed_simd_fp_handler simd_proto_handler[64];
+#ifdef CONFIG_QED_SRIOV
+ struct workqueue_struct *iov_wq;
+ struct delayed_work iov_task;
+ unsigned long iov_task_flags;
+#endif
+
struct z_stream_s *stream;
};
@@ -430,6 +497,13 @@ struct qed_dev {
u8 num_hwfns;
struct qed_hwfn hwfns[MAX_HWFNS_PER_DEVICE];
+ /* SRIOV */
+ struct qed_hw_sriov_info *p_iov_info;
+#define IS_QED_SRIOV(cdev) (!!(cdev)->p_iov_info)
+
+ unsigned long tunn_mode;
+
+ bool b_is_vf;
u32 drv_type;
struct qed_eth_stats *reset_stats;
@@ -459,6 +533,8 @@ struct qed_dev {
const struct firmware *firmware;
};
+#define NUM_OF_VFS(dev) MAX_NUM_VFS_BB
+#define NUM_OF_L2_QUEUES(dev) MAX_NUM_L2_QUEUES_BB
#define NUM_OF_SBS(dev) MAX_SB_PER_PATH_BB
#define NUM_OF_ENG_PFS(dev) MAX_NUM_PFS_BB
@@ -480,6 +556,10 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
#define PURE_LB_TC 8
+int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
+void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate);
+
+void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
#define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
/* Other Linux specific common definitions */
@@ -507,6 +587,4 @@ u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
int qed_slowpath_irq_req(struct qed_hwfn *hwfn);
-#define QED_ETH_INTERFACE_VERSION 300
-
#endif /* _QED_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index fc767c0..ac284c5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -24,11 +24,13 @@
#include "qed_hw.h"
#include "qed_init_ops.h"
#include "qed_reg_addr.h"
+#include "qed_sriov.h"
/* Max number of connection types in HW (DQ/CDU etc.) */
#define MAX_CONN_TYPES PROTOCOLID_COMMON
#define NUM_TASK_TYPES 2
#define NUM_TASK_PF_SEGMENTS 4
+#define NUM_TASK_VF_SEGMENTS 1
/* QM constants */
#define QM_PQ_ELEMENT_SIZE 4 /* in bytes */
@@ -63,10 +65,12 @@ union conn_context {
struct qed_conn_type_cfg {
u32 cid_count;
u32 cid_start;
+ u32 cids_per_vf;
};
/* ILT Client configuration, Per connection type (protocol) resources. */
#define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2)
+#define ILT_CLI_VF_BLOCKS (1 + NUM_TASK_VF_SEGMENTS * 2)
#define CDUC_BLK (0)
enum ilt_clients {
@@ -97,6 +101,10 @@ struct qed_ilt_client_cfg {
/* ILT client blocks for PF */
struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
u32 pf_total_lines;
+
+ /* ILT client blocks for VFs */
+ struct qed_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
+ u32 vf_total_lines;
};
/* Per Path -
@@ -123,6 +131,11 @@ struct qed_cxt_mngr {
/* computed ILT structure */
struct qed_ilt_client_cfg clients[ILT_CLI_MAX];
+ /* total number of VFs for this hwfn -
+ * ALL VFs are symmetric in terms of HW resources
+ */
+ u32 vf_count;
+
/* Acquired CIDs */
struct qed_cid_acquired_map acquired[MAX_CONN_TYPES];
@@ -131,37 +144,60 @@ struct qed_cxt_mngr {
u32 pf_start_line;
};
-static u32 qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr)
-{
- u32 type, pf_cids = 0;
+/* counts the iids for the CDU/CDUC ILT client configuration */
+struct qed_cdu_iids {
+ u32 pf_cids;
+ u32 per_vf_cids;
+};
- for (type = 0; type < MAX_CONN_TYPES; type++)
- pf_cids += p_mngr->conn_cfg[type].cid_count;
+static void qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr,
+ struct qed_cdu_iids *iids)
+{
+ u32 type;
- return pf_cids;
+ for (type = 0; type < MAX_CONN_TYPES; type++) {
+ iids->pf_cids += p_mngr->conn_cfg[type].cid_count;
+ iids->per_vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
+ }
}
static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
struct qed_qm_iids *iids)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
- int type;
+ u32 vf_cids = 0, type;
- for (type = 0; type < MAX_CONN_TYPES; type++)
+ for (type = 0; type < MAX_CONN_TYPES; type++) {
iids->cids += p_mngr->conn_cfg[type].cid_count;
+ vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
+ }
- DP_VERBOSE(p_hwfn, QED_MSG_ILT, "iids: CIDS %08x\n", iids->cids);
+ iids->vf_cids += vf_cids * p_mngr->vf_count;
+ DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+ "iids: CIDS %08x vf_cids %08x\n",
+ iids->cids, iids->vf_cids);
}
/* set the iids count per protocol */
static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
enum protocol_type type,
- u32 cid_count)
+ u32 cid_count, u32 vf_cid_cnt)
{
struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
struct qed_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type];
p_conn->cid_count = roundup(cid_count, DQ_RANGE_ALIGN);
+ p_conn->cids_per_vf = roundup(vf_cid_cnt, DQ_RANGE_ALIGN);
+}
+
+u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
+ enum protocol_type type,
+ u32 *vf_cid)
+{
+ if (vf_cid)
+ *vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf;
+
+ return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
}
static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli,
@@ -210,10 +246,12 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
struct qed_ilt_client_cfg *p_cli;
struct qed_ilt_cli_blk *p_blk;
- u32 curr_line, total, pf_cids;
+ struct qed_cdu_iids cdu_iids;
struct qed_qm_iids qm_iids;
+ u32 curr_line, total, i;
memset(&qm_iids, 0, sizeof(qm_iids));
+ memset(&cdu_iids, 0, sizeof(cdu_iids));
p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT);
@@ -224,14 +262,16 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
/* CDUC */
p_cli = &p_mngr->clients[ILT_CLI_CDUC];
curr_line = p_mngr->pf_start_line;
+
+ /* CDUC PF */
p_cli->pf_total_lines = 0;
/* get the counters for the CDUC and QM clients */
- pf_cids = qed_cxt_cdu_iids(p_mngr);
+ qed_cxt_cdu_iids(p_mngr, &cdu_iids);
p_blk = &p_cli->pf_blks[CDUC_BLK];
- total = pf_cids * CONN_CXT_SIZE(p_hwfn);
+ total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn);
qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
total, CONN_CXT_SIZE(p_hwfn));
@@ -239,17 +279,36 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
p_cli->pf_total_lines = curr_line - p_blk->start_line;
+ /* CDUC VF */
+ p_blk = &p_cli->vf_blks[CDUC_BLK];
+ total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn);
+
+ qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+ total, CONN_CXT_SIZE(p_hwfn));
+
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
+ p_cli->vf_total_lines = curr_line - p_blk->start_line;
+
+ for (i = 1; i < p_mngr->vf_count; i++)
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUC);
+
/* QM */
p_cli = &p_mngr->clients[ILT_CLI_QM];
p_blk = &p_cli->pf_blks[0];
qed_cxt_qm_iids(p_hwfn, &qm_iids);
- total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids, 0, 0,
- p_hwfn->qm_info.num_pqs, 0);
-
- DP_VERBOSE(p_hwfn, QED_MSG_ILT,
- "QM ILT Info, (cids=%d, num_pqs=%d, memory_size=%d)\n",
- qm_iids.cids, p_hwfn->qm_info.num_pqs, total);
+ total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids,
+ qm_iids.vf_cids, 0,
+ p_hwfn->qm_info.num_pqs,
+ p_hwfn->qm_info.num_vf_pqs);
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_ILT,
+ "QM ILT Info, (cids=%d, vf_cids=%d, num_pqs=%d, num_vf_pqs=%d, memory_size=%d)\n",
+ qm_iids.cids,
+ qm_iids.vf_cids,
+ p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total);
qed_ilt_cli_blk_fill(p_cli, p_blk,
curr_line, total * 0x1000,
@@ -358,7 +417,7 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
struct qed_ilt_client_cfg *clients = p_mngr->clients;
struct qed_ilt_cli_blk *p_blk;
- u32 size, i, j;
+ u32 size, i, j, k;
int rc;
size = qed_cxt_ilt_shadow_size(clients);
@@ -383,6 +442,16 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
if (rc != 0)
goto ilt_shadow_fail;
}
+ for (k = 0; k < p_mngr->vf_count; k++) {
+ for (j = 0; j < ILT_CLI_VF_BLOCKS; j++) {
+ u32 lines = clients[i].vf_total_lines * k;
+
+ p_blk = &clients[i].vf_blks[j];
+ rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, lines);
+ if (rc != 0)
+ goto ilt_shadow_fail;
+ }
+ }
}
return 0;
@@ -467,6 +536,9 @@ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
for (i = 0; i < ILT_CLI_MAX; i++)
p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
+ if (p_hwfn->cdev->p_iov_info)
+ p_mngr->vf_count = p_hwfn->cdev->p_iov_info->total_vfs;
+
/* Set the cxt mangr pointer priori to further allocations */
p_hwfn->p_cxt_mngr = p_mngr;
@@ -579,8 +651,10 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn)
params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
params.is_first_pf = p_hwfn->first_on_engine;
params.num_pf_cids = iids.cids;
+ params.num_vf_cids = iids.vf_cids;
params.start_pq = qm_info->start_pq;
- params.num_pf_pqs = qm_info->num_pqs;
+ params.num_pf_pqs = qm_info->num_pqs - qm_info->num_vf_pqs;
+ params.num_vf_pqs = qm_info->num_vf_pqs;
params.start_vport = qm_info->start_vport;
params.num_vports = qm_info->num_vports;
params.pf_wfq = qm_info->pf_wfq;
@@ -610,26 +684,55 @@ static int qed_cm_init_pf(struct qed_hwfn *p_hwfn)
static void qed_dq_init_pf(struct qed_hwfn *p_hwfn)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
- u32 dq_pf_max_cid = 0;
+ u32 dq_pf_max_cid = 0, dq_vf_max_cid = 0;
dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid);
+ dq_vf_max_cid += (p_mngr->conn_cfg[0].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_0_RT_OFFSET, dq_vf_max_cid);
+
dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid);
+ dq_vf_max_cid += (p_mngr->conn_cfg[1].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_1_RT_OFFSET, dq_vf_max_cid);
+
dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid);
+ dq_vf_max_cid += (p_mngr->conn_cfg[2].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_2_RT_OFFSET, dq_vf_max_cid);
+
dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid);
+ dq_vf_max_cid += (p_mngr->conn_cfg[3].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_3_RT_OFFSET, dq_vf_max_cid);
+
dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid);
- /* 5 - PF */
+ dq_vf_max_cid += (p_mngr->conn_cfg[4].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_4_RT_OFFSET, dq_vf_max_cid);
+
dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid);
+
+ dq_vf_max_cid += (p_mngr->conn_cfg[5].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_5_RT_OFFSET, dq_vf_max_cid);
+
+ /* Connection types 6 & 7 are not in use, yet they must be configured
+ * as the highest possible connection. Not configuring them means the
+ * defaults will be used, and with a large number of cids a bug may
+ * occur, if the defaults will be smaller than dq_pf_max_cid /
+ * dq_vf_max_cid.
+ */
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_6_RT_OFFSET, dq_pf_max_cid);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_6_RT_OFFSET, dq_vf_max_cid);
+
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_7_RT_OFFSET, dq_pf_max_cid);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_7_RT_OFFSET, dq_vf_max_cid);
}
static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn)
@@ -653,6 +756,38 @@ static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn)
}
}
+static void qed_ilt_vf_bounds_init(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ilt_client_cfg *p_cli;
+ u32 blk_factor;
+
+ /* For simplicty we set the 'block' to be an ILT page */
+ if (p_hwfn->cdev->p_iov_info) {
+ struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
+
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_VF_BASE_RT_OFFSET,
+ p_iov->first_vf_in_pf);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET,
+ p_iov->first_vf_in_pf + p_iov->total_vfs);
+ }
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
+ blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
+ if (p_cli->active) {
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET,
+ blk_factor);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
+ p_cli->pf_total_lines);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET,
+ p_cli->vf_total_lines);
+ }
+}
+
/* ILT (PSWRQ2) PF */
static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
{
@@ -662,6 +797,7 @@ static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
u32 line, rt_offst, i;
qed_ilt_bounds_init(p_hwfn);
+ qed_ilt_vf_bounds_init(p_hwfn);
p_mngr = p_hwfn->p_cxt_mngr;
p_shdw = p_mngr->ilt_shadow;
@@ -839,10 +975,10 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn)
/* Set the number of required CORE connections */
u32 core_cids = 1; /* SPQ */
- qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids);
+ qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0);
qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
- p_params->num_cons);
+ p_params->num_cons, 1);
return 0;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
index c8e1f5e..234c0fa 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -51,6 +51,9 @@ enum qed_cxt_elem_type {
QED_ELEM_TASK
};
+u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
+ enum protocol_type type, u32 *vf_cid);
+
/**
* @brief qed_cxt_set_pf_params - Set the PF params for cxt init
*
@@ -128,6 +131,16 @@ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn);
void qed_qm_init_pf(struct qed_hwfn *p_hwfn);
/**
+ * @brief Reconfigures QM pf on the fly
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return int
+ */
+int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+/**
* @brief qed_cxt_release - Release a cid
*
* @param p_hwfn
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
new file mode 100644
index 0000000..cbf58e1
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -0,0 +1,562 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dcbx.h"
+#include "qed_hsi.h"
+#include "qed_sp.h"
+
+#define QED_DCBX_MAX_MIB_READ_TRY (100)
+#define QED_ETH_TYPE_DEFAULT (0)
+#define QED_ETH_TYPE_ROCE (0x8915)
+#define QED_UDP_PORT_TYPE_ROCE_V2 (0x12B7)
+#define QED_ETH_TYPE_FCOE (0x8906)
+#define QED_TCP_PORT_ISCSI (0xCBC)
+
+#define QED_DCBX_INVALID_PRIORITY 0xFF
+
+/* Get Traffic Class from priority traffic class table, 4 bits represent
+ * the traffic class corresponding to the priority.
+ */
+#define QED_DCBX_PRIO2TC(prio_tc_tbl, prio) \
+ ((u32)(prio_tc_tbl >> ((7 - prio) * 4)) & 0x7)
+
+static const struct qed_dcbx_app_metadata qed_dcbx_app_update[] = {
+ {DCBX_PROTOCOL_ISCSI, "ISCSI", QED_PCI_DEFAULT},
+ {DCBX_PROTOCOL_FCOE, "FCOE", QED_PCI_DEFAULT},
+ {DCBX_PROTOCOL_ROCE, "ROCE", QED_PCI_DEFAULT},
+ {DCBX_PROTOCOL_ROCE_V2, "ROCE_V2", QED_PCI_DEFAULT},
+ {DCBX_PROTOCOL_ETH, "ETH", QED_PCI_ETH}
+};
+
+static bool qed_dcbx_app_ethtype(u32 app_info_bitmap)
+{
+ return !!(QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) ==
+ DCBX_APP_SF_ETHTYPE);
+}
+
+static bool qed_dcbx_app_port(u32 app_info_bitmap)
+{
+ return !!(QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) ==
+ DCBX_APP_SF_PORT);
+}
+
+static bool qed_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id)
+{
+ return !!(qed_dcbx_app_ethtype(app_info_bitmap) &&
+ proto_id == QED_ETH_TYPE_DEFAULT);
+}
+
+static bool qed_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id)
+{
+ return !!(qed_dcbx_app_port(app_info_bitmap) &&
+ proto_id == QED_TCP_PORT_ISCSI);
+}
+
+static bool qed_dcbx_fcoe_tlv(u32 app_info_bitmap, u16 proto_id)
+{
+ return !!(qed_dcbx_app_ethtype(app_info_bitmap) &&
+ proto_id == QED_ETH_TYPE_FCOE);
+}
+
+static bool qed_dcbx_roce_tlv(u32 app_info_bitmap, u16 proto_id)
+{
+ return !!(qed_dcbx_app_ethtype(app_info_bitmap) &&
+ proto_id == QED_ETH_TYPE_ROCE);
+}
+
+static bool qed_dcbx_roce_v2_tlv(u32 app_info_bitmap, u16 proto_id)
+{
+ return !!(qed_dcbx_app_port(app_info_bitmap) &&
+ proto_id == QED_UDP_PORT_TYPE_ROCE_V2);
+}
+
+static void
+qed_dcbx_dp_protocol(struct qed_hwfn *p_hwfn, struct qed_dcbx_results *p_data)
+{
+ enum dcbx_protocol_type id;
+ int i;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB, "DCBX negotiated: %d\n",
+ p_data->dcbx_enabled);
+
+ for (i = 0; i < ARRAY_SIZE(qed_dcbx_app_update); i++) {
+ id = qed_dcbx_app_update[i].id;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+ "%s info: update %d, enable %d, prio %d, tc %d, num_tc %d\n",
+ qed_dcbx_app_update[i].name, p_data->arr[id].update,
+ p_data->arr[id].enable, p_data->arr[id].priority,
+ p_data->arr[id].tc, p_hwfn->hw_info.num_tc);
+ }
+}
+
+static void
+qed_dcbx_set_params(struct qed_dcbx_results *p_data,
+ struct qed_hw_info *p_info,
+ bool enable,
+ bool update,
+ u8 prio,
+ u8 tc,
+ enum dcbx_protocol_type type,
+ enum qed_pci_personality personality)
+{
+ /* PF update ramrod data */
+ p_data->arr[type].update = update;
+ p_data->arr[type].enable = enable;
+ p_data->arr[type].priority = prio;
+ p_data->arr[type].tc = tc;
+
+ /* QM reconf data */
+ if (p_info->personality == personality) {
+ if (personality == QED_PCI_ETH)
+ p_info->non_offload_tc = tc;
+ else
+ p_info->offload_tc = tc;
+ }
+}
+
+/* Update app protocol data and hw_info fields with the TLV info */
+static void
+qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
+ struct qed_hwfn *p_hwfn,
+ bool enable,
+ bool update,
+ u8 prio, u8 tc, enum dcbx_protocol_type type)
+{
+ struct qed_hw_info *p_info = &p_hwfn->hw_info;
+ enum qed_pci_personality personality;
+ enum dcbx_protocol_type id;
+ char *name;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qed_dcbx_app_update); i++) {
+ id = qed_dcbx_app_update[i].id;
+
+ if (type != id)
+ continue;
+
+ personality = qed_dcbx_app_update[i].personality;
+ name = qed_dcbx_app_update[i].name;
+
+ qed_dcbx_set_params(p_data, p_info, enable, update,
+ prio, tc, type, personality);
+ }
+}
+
+static bool
+qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn,
+ u32 app_prio_bitmap,
+ u16 id, enum dcbx_protocol_type *type)
+{
+ if (qed_dcbx_fcoe_tlv(app_prio_bitmap, id)) {
+ *type = DCBX_PROTOCOL_FCOE;
+ } else if (qed_dcbx_roce_tlv(app_prio_bitmap, id)) {
+ *type = DCBX_PROTOCOL_ROCE;
+ } else if (qed_dcbx_iscsi_tlv(app_prio_bitmap, id)) {
+ *type = DCBX_PROTOCOL_ISCSI;
+ } else if (qed_dcbx_default_tlv(app_prio_bitmap, id)) {
+ *type = DCBX_PROTOCOL_ETH;
+ } else if (qed_dcbx_roce_v2_tlv(app_prio_bitmap, id)) {
+ *type = DCBX_PROTOCOL_ROCE_V2;
+ } else {
+ *type = DCBX_MAX_PROTOCOL_TYPE;
+ DP_ERR(p_hwfn,
+ "No action required, App TLV id = 0x%x app_prio_bitmap = 0x%x\n",
+ id, app_prio_bitmap);
+ return false;
+ }
+
+ return true;
+}
+
+/* Parse app TLV's to update TC information in hw_info structure for
+ * reconfiguring QM. Get protocol specific data for PF update ramrod command.
+ */
+static int
+qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
+ struct qed_dcbx_results *p_data,
+ struct dcbx_app_priority_entry *p_tbl,
+ u32 pri_tc_tbl, int count, bool dcbx_enabled)
+{
+ u8 tc, priority, priority_map;
+ enum dcbx_protocol_type type;
+ u16 protocol_id;
+ bool enable;
+ int i;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Num APP entries = %d\n", count);
+
+ /* Parse APP TLV */
+ for (i = 0; i < count; i++) {
+ protocol_id = QED_MFW_GET_FIELD(p_tbl[i].entry,
+ DCBX_APP_PROTOCOL_ID);
+ priority_map = QED_MFW_GET_FIELD(p_tbl[i].entry,
+ DCBX_APP_PRI_MAP);
+ priority = ffs(priority_map) - 1;
+ if (priority < 0) {
+ DP_ERR(p_hwfn, "Invalid priority\n");
+ return -EINVAL;
+ }
+
+ tc = QED_DCBX_PRIO2TC(pri_tc_tbl, priority);
+ if (qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry,
+ protocol_id, &type)) {
+ /* ETH always have the enable bit reset, as it gets
+ * vlan information per packet. For other protocols,
+ * should be set according to the dcbx_enabled
+ * indication, but we only got here if there was an
+ * app tlv for the protocol, so dcbx must be enabled.
+ */
+ enable = !!(type == DCBX_PROTOCOL_ETH);
+
+ qed_dcbx_update_app_info(p_data, p_hwfn, enable, true,
+ priority, tc, type);
+ }
+ }
+
+ /* If RoCE-V2 TLV is not detected, driver need to use RoCE app
+ * data for RoCE-v2 not the default app data.
+ */
+ if (!p_data->arr[DCBX_PROTOCOL_ROCE_V2].update &&
+ p_data->arr[DCBX_PROTOCOL_ROCE].update) {
+ tc = p_data->arr[DCBX_PROTOCOL_ROCE].tc;
+ priority = p_data->arr[DCBX_PROTOCOL_ROCE].priority;
+ qed_dcbx_update_app_info(p_data, p_hwfn, true, true,
+ priority, tc, DCBX_PROTOCOL_ROCE_V2);
+ }
+
+ /* Update ramrod protocol data and hw_info fields
+ * with default info when corresponding APP TLV's are not detected.
+ * The enabled field has a different logic for ethernet as only for
+ * ethernet dcb should disabled by default, as the information arrives
+ * from the OS (unless an explicit app tlv was present).
+ */
+ tc = p_data->arr[DCBX_PROTOCOL_ETH].tc;
+ priority = p_data->arr[DCBX_PROTOCOL_ETH].priority;
+ for (type = 0; type < DCBX_MAX_PROTOCOL_TYPE; type++) {
+ if (p_data->arr[type].update)
+ continue;
+
+ enable = (type == DCBX_PROTOCOL_ETH) ? false : dcbx_enabled;
+ qed_dcbx_update_app_info(p_data, p_hwfn, enable, true,
+ priority, tc, type);
+ }
+
+ return 0;
+}
+
+/* Parse app TLV's to update TC information in hw_info structure for
+ * reconfiguring QM. Get protocol specific data for PF update ramrod command.
+ */
+static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn)
+{
+ struct dcbx_app_priority_feature *p_app;
+ struct dcbx_app_priority_entry *p_tbl;
+ struct qed_dcbx_results data = { 0 };
+ struct dcbx_ets_feature *p_ets;
+ struct qed_hw_info *p_info;
+ u32 pri_tc_tbl, flags;
+ bool dcbx_enabled;
+ int num_entries;
+ int rc = 0;
+
+ /* If DCBx version is non zero, then negotiation was
+ * successfuly performed
+ */
+ flags = p_hwfn->p_dcbx_info->operational.flags;
+ dcbx_enabled = !!QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION);
+
+ p_app = &p_hwfn->p_dcbx_info->operational.features.app;
+ p_tbl = p_app->app_pri_tbl;
+
+ p_ets = &p_hwfn->p_dcbx_info->operational.features.ets;
+ pri_tc_tbl = p_ets->pri_tc_tbl[0];
+
+ p_info = &p_hwfn->hw_info;
+ num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES);
+
+ rc = qed_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl,
+ num_entries, dcbx_enabled);
+ if (rc)
+ return rc;
+
+ p_info->num_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS);
+ data.pf_id = p_hwfn->rel_pf_id;
+ data.dcbx_enabled = dcbx_enabled;
+
+ qed_dcbx_dp_protocol(p_hwfn, &data);
+
+ memcpy(&p_hwfn->p_dcbx_info->results, &data,
+ sizeof(struct qed_dcbx_results));
+
+ return 0;
+}
+
+static int
+qed_dcbx_copy_mib(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_dcbx_mib_meta_data *p_data,
+ enum qed_mib_read_type type)
+{
+ u32 prefix_seq_num, suffix_seq_num;
+ int read_count = 0;
+ int rc = 0;
+
+ /* The data is considered to be valid only if both sequence numbers are
+ * the same.
+ */
+ do {
+ if (type == QED_DCBX_REMOTE_LLDP_MIB) {
+ qed_memcpy_from(p_hwfn, p_ptt, p_data->lldp_remote,
+ p_data->addr, p_data->size);
+ prefix_seq_num = p_data->lldp_remote->prefix_seq_num;
+ suffix_seq_num = p_data->lldp_remote->suffix_seq_num;
+ } else {
+ qed_memcpy_from(p_hwfn, p_ptt, p_data->mib,
+ p_data->addr, p_data->size);
+ prefix_seq_num = p_data->mib->prefix_seq_num;
+ suffix_seq_num = p_data->mib->suffix_seq_num;
+ }
+ read_count++;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DCB,
+ "mib type = %d, try count = %d prefix seq num = %d suffix seq num = %d\n",
+ type, read_count, prefix_seq_num, suffix_seq_num);
+ } while ((prefix_seq_num != suffix_seq_num) &&
+ (read_count < QED_DCBX_MAX_MIB_READ_TRY));
+
+ if (read_count >= QED_DCBX_MAX_MIB_READ_TRY) {
+ DP_ERR(p_hwfn,
+ "MIB read err, mib type = %d, try count = %d prefix seq num = %d suffix seq num = %d\n",
+ type, read_count, prefix_seq_num, suffix_seq_num);
+ rc = -EIO;
+ }
+
+ return rc;
+}
+
+static int
+qed_dcbx_read_local_lldp_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_dcbx_mib_meta_data data;
+ int rc = 0;
+
+ memset(&data, 0, sizeof(data));
+ data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port,
+ lldp_config_params);
+ data.lldp_local = p_hwfn->p_dcbx_info->lldp_local;
+ data.size = sizeof(struct lldp_config_params_s);
+ qed_memcpy_from(p_hwfn, p_ptt, data.lldp_local, data.addr, data.size);
+
+ return rc;
+}
+
+static int
+qed_dcbx_read_remote_lldp_mib(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_mib_read_type type)
+{
+ struct qed_dcbx_mib_meta_data data;
+ int rc = 0;
+
+ memset(&data, 0, sizeof(data));
+ data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port,
+ lldp_status_params);
+ data.lldp_remote = p_hwfn->p_dcbx_info->lldp_remote;
+ data.size = sizeof(struct lldp_status_params_s);
+ rc = qed_dcbx_copy_mib(p_hwfn, p_ptt, &data, type);
+
+ return rc;
+}
+
+static int
+qed_dcbx_read_operational_mib(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_mib_read_type type)
+{
+ struct qed_dcbx_mib_meta_data data;
+ int rc = 0;
+
+ memset(&data, 0, sizeof(data));
+ data.addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, operational_dcbx_mib);
+ data.mib = &p_hwfn->p_dcbx_info->operational;
+ data.size = sizeof(struct dcbx_mib);
+ rc = qed_dcbx_copy_mib(p_hwfn, p_ptt, &data, type);
+
+ return rc;
+}
+
+static int
+qed_dcbx_read_remote_mib(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, enum qed_mib_read_type type)
+{
+ struct qed_dcbx_mib_meta_data data;
+ int rc = 0;
+
+ memset(&data, 0, sizeof(data));
+ data.addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, remote_dcbx_mib);
+ data.mib = &p_hwfn->p_dcbx_info->remote;
+ data.size = sizeof(struct dcbx_mib);
+ rc = qed_dcbx_copy_mib(p_hwfn, p_ptt, &data, type);
+
+ return rc;
+}
+
+static int
+qed_dcbx_read_local_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_dcbx_mib_meta_data data;
+ int rc = 0;
+
+ memset(&data, 0, sizeof(data));
+ data.addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, local_admin_dcbx_mib);
+ data.local_admin = &p_hwfn->p_dcbx_info->local_admin;
+ data.size = sizeof(struct dcbx_local_params);
+ qed_memcpy_from(p_hwfn, p_ptt, data.local_admin, data.addr, data.size);
+
+ return rc;
+}
+
+static int qed_dcbx_read_mib(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, enum qed_mib_read_type type)
+{
+ int rc = -EINVAL;
+
+ switch (type) {
+ case QED_DCBX_OPERATIONAL_MIB:
+ rc = qed_dcbx_read_operational_mib(p_hwfn, p_ptt, type);
+ break;
+ case QED_DCBX_REMOTE_MIB:
+ rc = qed_dcbx_read_remote_mib(p_hwfn, p_ptt, type);
+ break;
+ case QED_DCBX_LOCAL_MIB:
+ rc = qed_dcbx_read_local_mib(p_hwfn, p_ptt);
+ break;
+ case QED_DCBX_REMOTE_LLDP_MIB:
+ rc = qed_dcbx_read_remote_lldp_mib(p_hwfn, p_ptt, type);
+ break;
+ case QED_DCBX_LOCAL_LLDP_MIB:
+ rc = qed_dcbx_read_local_lldp_mib(p_hwfn, p_ptt);
+ break;
+ default:
+ DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type);
+ }
+
+ return rc;
+}
+
+/* Read updated MIB.
+ * Reconfigure QM and invoke PF update ramrod command if operational MIB
+ * change is detected.
+ */
+int
+qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, enum qed_mib_read_type type)
+{
+ int rc = 0;
+
+ rc = qed_dcbx_read_mib(p_hwfn, p_ptt, type);
+ if (rc)
+ return rc;
+
+ if (type == QED_DCBX_OPERATIONAL_MIB) {
+ rc = qed_dcbx_process_mib_info(p_hwfn);
+ if (!rc) {
+ /* reconfigure tcs of QM queues according
+ * to negotiation results
+ */
+ qed_qm_reconf(p_hwfn, p_ptt);
+
+ /* update storm FW with negotiation results */
+ qed_sp_pf_update(p_hwfn);
+ }
+ }
+
+ return rc;
+}
+
+int qed_dcbx_info_alloc(struct qed_hwfn *p_hwfn)
+{
+ int rc = 0;
+
+ p_hwfn->p_dcbx_info = kzalloc(sizeof(*p_hwfn->p_dcbx_info), GFP_KERNEL);
+ if (!p_hwfn->p_dcbx_info) {
+ DP_NOTICE(p_hwfn,
+ "Failed to allocate 'struct qed_dcbx_info'\n");
+ rc = -ENOMEM;
+ }
+
+ return rc;
+}
+
+void qed_dcbx_info_free(struct qed_hwfn *p_hwfn,
+ struct qed_dcbx_info *p_dcbx_info)
+{
+ kfree(p_hwfn->p_dcbx_info);
+}
+
+static void qed_dcbx_update_protocol_data(struct protocol_dcb_data *p_data,
+ struct qed_dcbx_results *p_src,
+ enum dcbx_protocol_type type)
+{
+ p_data->dcb_enable_flag = p_src->arr[type].enable;
+ p_data->dcb_priority = p_src->arr[type].priority;
+ p_data->dcb_tc = p_src->arr[type].tc;
+}
+
+/* Set pf update ramrod command params */
+void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src,
+ struct pf_update_ramrod_data *p_dest)
+{
+ struct protocol_dcb_data *p_dcb_data;
+ bool update_flag = false;
+
+ p_dest->pf_id = p_src->pf_id;
+
+ update_flag = p_src->arr[DCBX_PROTOCOL_FCOE].update;
+ p_dest->update_fcoe_dcb_data_flag = update_flag;
+
+ update_flag = p_src->arr[DCBX_PROTOCOL_ROCE].update;
+ p_dest->update_roce_dcb_data_flag = update_flag;
+ update_flag = p_src->arr[DCBX_PROTOCOL_ROCE_V2].update;
+ p_dest->update_roce_dcb_data_flag = update_flag;
+
+ update_flag = p_src->arr[DCBX_PROTOCOL_ISCSI].update;
+ p_dest->update_iscsi_dcb_data_flag = update_flag;
+ update_flag = p_src->arr[DCBX_PROTOCOL_ETH].update;
+ p_dest->update_eth_dcb_data_flag = update_flag;
+
+ p_dcb_data = &p_dest->fcoe_dcb_data;
+ qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_FCOE);
+ p_dcb_data = &p_dest->roce_dcb_data;
+
+ if (p_src->arr[DCBX_PROTOCOL_ROCE].update)
+ qed_dcbx_update_protocol_data(p_dcb_data, p_src,
+ DCBX_PROTOCOL_ROCE);
+ if (p_src->arr[DCBX_PROTOCOL_ROCE_V2].update)
+ qed_dcbx_update_protocol_data(p_dcb_data, p_src,
+ DCBX_PROTOCOL_ROCE_V2);
+
+ p_dcb_data = &p_dest->iscsi_dcb_data;
+ qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ISCSI);
+ p_dcb_data = &p_dest->eth_dcb_data;
+ qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ETH);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
new file mode 100644
index 0000000..e7f834d
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
@@ -0,0 +1,80 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_DCBX_H
+#define _QED_DCBX_H
+#include <linux/types.h>
+#include <linux/slab.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+
+#define DCBX_CONFIG_MAX_APP_PROTOCOL 4
+
+enum qed_mib_read_type {
+ QED_DCBX_OPERATIONAL_MIB,
+ QED_DCBX_REMOTE_MIB,
+ QED_DCBX_LOCAL_MIB,
+ QED_DCBX_REMOTE_LLDP_MIB,
+ QED_DCBX_LOCAL_LLDP_MIB
+};
+
+struct qed_dcbx_app_data {
+ bool enable; /* DCB enabled */
+ bool update; /* Update indication */
+ u8 priority; /* Priority */
+ u8 tc; /* Traffic Class */
+};
+
+struct qed_dcbx_results {
+ bool dcbx_enabled;
+ u8 pf_id;
+ struct qed_dcbx_app_data arr[DCBX_MAX_PROTOCOL_TYPE];
+};
+
+struct qed_dcbx_app_metadata {
+ enum dcbx_protocol_type id;
+ char *name;
+ enum qed_pci_personality personality;
+};
+
+#define QED_MFW_GET_FIELD(name, field) \
+ (((name) & (field ## _MASK)) >> (field ## _SHIFT))
+
+struct qed_dcbx_info {
+ struct lldp_status_params_s lldp_remote[LLDP_MAX_LLDP_AGENTS];
+ struct lldp_config_params_s lldp_local[LLDP_MAX_LLDP_AGENTS];
+ struct dcbx_local_params local_admin;
+ struct qed_dcbx_results results;
+ struct dcbx_mib operational;
+ struct dcbx_mib remote;
+ u8 dcbx_cap;
+};
+
+struct qed_dcbx_mib_meta_data {
+ struct lldp_config_params_s *lldp_local;
+ struct lldp_status_params_s *lldp_remote;
+ struct dcbx_local_params *local_admin;
+ struct dcbx_mib *mib;
+ size_t size;
+ u32 addr;
+};
+
+/* QED local interface routines */
+int
+qed_dcbx_mib_update_event(struct qed_hwfn *,
+ struct qed_ptt *, enum qed_mib_read_type);
+
+int qed_dcbx_info_alloc(struct qed_hwfn *p_hwfn);
+void qed_dcbx_info_free(struct qed_hwfn *, struct qed_dcbx_info *);
+void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src,
+ struct pf_update_ramrod_data *p_dest);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index b7d100f..089016f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -22,6 +22,7 @@
#include <linux/qed/qed_if.h>
#include "qed.h"
#include "qed_cxt.h"
+#include "qed_dcbx.h"
#include "qed_dev_api.h"
#include "qed_hsi.h"
#include "qed_hw.h"
@@ -30,6 +31,11 @@
#include "qed_mcp.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
+#include "qed_sriov.h"
+#include "qed_vf.h"
+
+static spinlock_t qm_lock;
+static bool qm_lock_init = false;
/* API common to all protocols */
enum BAR_ID {
@@ -40,10 +46,14 @@ enum BAR_ID {
static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
enum BAR_ID bar_id)
{
- u32 bar_reg = (bar_id == BAR_ID_0 ?
- PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
- u32 val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
+ u32 bar_reg = (bar_id == BAR_ID_0 ?
+ PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
+ u32 val;
+
+ if (IS_VF(p_hwfn->cdev))
+ return 1 << 17;
+ val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
if (val)
return 1 << (val + 15);
@@ -105,12 +115,17 @@ static void qed_qm_info_free(struct qed_hwfn *p_hwfn)
qm_info->qm_vport_params = NULL;
kfree(qm_info->qm_port_params);
qm_info->qm_port_params = NULL;
+ kfree(qm_info->wfq_data);
+ qm_info->wfq_data = NULL;
}
void qed_resc_free(struct qed_dev *cdev)
{
int i;
+ if (IS_VF(cdev))
+ return;
+
kfree(cdev->fw_data);
cdev->fw_data = NULL;
@@ -134,20 +149,27 @@ void qed_resc_free(struct qed_dev *cdev)
qed_eq_free(p_hwfn, p_hwfn->p_eq);
qed_consq_free(p_hwfn, p_hwfn->p_consq);
qed_int_free(p_hwfn);
+ qed_iov_free(p_hwfn);
qed_dmae_info_free(p_hwfn);
+ qed_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info);
}
}
static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
{
+ u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0;
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
struct init_qm_port_params *p_qm_port;
- u8 num_vports, i, vport_id, num_ports;
u16 num_pqs, multi_cos_tcs = 1;
+ u16 num_vfs = 0;
+#ifdef CONFIG_QED_SRIOV
+ if (p_hwfn->cdev->p_iov_info)
+ num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
+#endif
memset(qm_info, 0, sizeof(*qm_info));
- num_pqs = multi_cos_tcs + 1; /* The '1' is for pure-LB */
+ num_pqs = multi_cos_tcs + num_vfs + 1; /* The '1' is for pure-LB */
num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT);
/* Sanity checking that setup requires legal number of resources */
@@ -175,25 +197,50 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
if (!qm_info->qm_port_params)
goto alloc_err;
+ qm_info->wfq_data = kcalloc(num_vports, sizeof(*qm_info->wfq_data),
+ GFP_KERNEL);
+ if (!qm_info->wfq_data)
+ goto alloc_err;
+
vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
/* First init per-TC PQs */
for (i = 0; i < multi_cos_tcs; i++) {
- struct init_qm_pq_params *params = &qm_info->qm_pq_params[i];
-
- params->vport_id = vport_id;
- params->tc_id = p_hwfn->hw_info.non_offload_tc;
- params->wrr_group = 1;
+ struct init_qm_pq_params *params =
+ &qm_info->qm_pq_params[curr_queue++];
+
+ if (p_hwfn->hw_info.personality == QED_PCI_ETH) {
+ params->vport_id = vport_id;
+ params->tc_id = p_hwfn->hw_info.non_offload_tc;
+ params->wrr_group = 1;
+ } else {
+ params->vport_id = vport_id;
+ params->tc_id = p_hwfn->hw_info.offload_tc;
+ params->wrr_group = 1;
+ }
}
/* Then init pure-LB PQ */
- qm_info->pure_lb_pq = i;
- qm_info->qm_pq_params[i].vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
- qm_info->qm_pq_params[i].tc_id = PURE_LB_TC;
- qm_info->qm_pq_params[i].wrr_group = 1;
- i++;
+ qm_info->pure_lb_pq = curr_queue;
+ qm_info->qm_pq_params[curr_queue].vport_id =
+ (u8) RESC_START(p_hwfn, QED_VPORT);
+ qm_info->qm_pq_params[curr_queue].tc_id = PURE_LB_TC;
+ qm_info->qm_pq_params[curr_queue].wrr_group = 1;
+ curr_queue++;
qm_info->offload_pq = 0;
+ /* Then init per-VF PQs */
+ vf_offset = curr_queue;
+ for (i = 0; i < num_vfs; i++) {
+ /* First vport is used by the PF */
+ qm_info->qm_pq_params[curr_queue].vport_id = vport_id + i + 1;
+ qm_info->qm_pq_params[curr_queue].tc_id =
+ p_hwfn->hw_info.non_offload_tc;
+ qm_info->qm_pq_params[curr_queue].wrr_group = 1;
+ curr_queue++;
+ }
+
+ qm_info->vf_queues_offset = vf_offset;
qm_info->num_pqs = num_pqs;
qm_info->num_vports = num_vports;
@@ -211,29 +258,91 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ);
- qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT);
+ qm_info->num_vf_pqs = num_vfs;
+ qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT);
+
+ for (i = 0; i < qm_info->num_vports; i++)
+ qm_info->qm_vport_params[i].vport_wfq = 1;
qm_info->pf_wfq = 0;
qm_info->pf_rl = 0;
qm_info->vport_rl_en = 1;
+ qm_info->vport_wfq_en = 1;
return 0;
alloc_err:
DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n");
- kfree(qm_info->qm_pq_params);
- kfree(qm_info->qm_vport_params);
- kfree(qm_info->qm_port_params);
-
+ qed_qm_info_free(p_hwfn);
return -ENOMEM;
}
+/* This function reconfigures the QM pf on the fly.
+ * For this purpose we:
+ * 1. reconfigure the QM database
+ * 2. set new values to runtime arrat
+ * 3. send an sdm_qm_cmd through the rbc interface to stop the QM
+ * 4. activate init tool in QM_PF stage
+ * 5. send an sdm_qm_cmd through rbc interface to release the QM
+ */
+int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ bool b_rc;
+ int rc;
+
+ /* qm_info is allocated in qed_init_qm_info() which is already called
+ * from qed_resc_alloc() or previous call of qed_qm_reconf().
+ * The allocated size may change each init, so we free it before next
+ * allocation.
+ */
+ qed_qm_info_free(p_hwfn);
+
+ /* initialize qed's qm data structure */
+ rc = qed_init_qm_info(p_hwfn);
+ if (rc)
+ return rc;
+
+ /* stop PF's qm queues */
+ spin_lock_bh(&qm_lock);
+ b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, false, true,
+ qm_info->start_pq, qm_info->num_pqs);
+ spin_unlock_bh(&qm_lock);
+ if (!b_rc)
+ return -EINVAL;
+
+ /* clear the QM_PF runtime phase leftovers from previous init */
+ qed_init_clear_rt_data(p_hwfn);
+
+ /* prepare QM portion of runtime array */
+ qed_qm_init_pf(p_hwfn);
+
+ /* activate init tool on runtime array */
+ rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
+ p_hwfn->hw_info.hw_mode);
+ if (rc)
+ return rc;
+
+ /* start PF's qm queues */
+ spin_lock_bh(&qm_lock);
+ b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, true, true,
+ qm_info->start_pq, qm_info->num_pqs);
+ spin_unlock_bh(&qm_lock);
+ if (!b_rc)
+ return -EINVAL;
+
+ return 0;
+}
+
int qed_resc_alloc(struct qed_dev *cdev)
{
struct qed_consq *p_consq;
struct qed_eq *p_eq;
int i, rc = 0;
+ if (IS_VF(cdev))
+ return rc;
+
cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL);
if (!cdev->fw_data)
return -ENOMEM;
@@ -308,6 +417,10 @@ int qed_resc_alloc(struct qed_dev *cdev)
if (rc)
goto alloc_err;
+ rc = qed_iov_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
/* EQ */
p_eq = qed_eq_alloc(p_hwfn, 256);
if (!p_eq) {
@@ -330,6 +443,14 @@ int qed_resc_alloc(struct qed_dev *cdev)
"Failed to allocate memory for dmae_info structure\n");
goto alloc_err;
}
+
+ /* DCBX initialization */
+ rc = qed_dcbx_info_alloc(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed to allocate memory for dcbx structure\n");
+ goto alloc_err;
+ }
}
cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL);
@@ -350,6 +471,9 @@ void qed_resc_setup(struct qed_dev *cdev)
{
int i;
+ if (IS_VF(cdev))
+ return;
+
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
@@ -365,14 +489,15 @@ void qed_resc_setup(struct qed_dev *cdev)
p_hwfn->mcp_info->mfw_mb_length);
qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
+
+ qed_iov_setup(p_hwfn, p_hwfn->p_main_ptt);
}
}
#define FINAL_CLEANUP_POLL_CNT (100)
#define FINAL_CLEANUP_POLL_TIME (10)
int qed_final_cleanup(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u16 id)
+ struct qed_ptt *p_ptt, u16 id, bool is_vf)
{
u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
int rc = -EBUSY;
@@ -380,6 +505,9 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn,
addr = GTT_BAR0_MAP_REG_USDM_RAM +
USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id);
+ if (is_vf)
+ id += 0x10;
+
command |= X_FINAL_CLEANUP_AGG_INT <<
SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT;
command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT;
@@ -492,7 +620,9 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
struct qed_qm_common_rt_init_params params;
struct qed_dev *cdev = p_hwfn->cdev;
+ u32 concrete_fid;
int rc = 0;
+ u8 vf_id;
qed_init_cau_rt_data(cdev);
@@ -542,6 +672,14 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn, p_ptt, 0x20b4,
qed_rd(p_hwfn, p_ptt, 0x20b4) & ~0x10);
+ for (vf_id = 0; vf_id < MAX_NUM_VFS_BB; vf_id++) {
+ concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id);
+ qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid);
+ qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
+ }
+ /* pretend to original PF */
+ qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+
return rc;
}
@@ -558,6 +696,7 @@ static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
+ struct qed_tunn_start_params *p_tunn,
int hw_mode,
bool b_hw_start,
enum qed_int_mode int_mode,
@@ -574,7 +713,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
/* Update rate limit once we'll actually have a link */
- p_hwfn->qm_info.pf_rl = 100;
+ p_hwfn->qm_info.pf_rl = 100000;
}
qed_cxt_hw_init_pf(p_hwfn);
@@ -603,7 +742,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
/* Cleanup chip from previous driver if such remains exist */
- rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id);
+ rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false);
if (rc != 0)
return rc;
@@ -625,7 +764,8 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
/* send function start command */
- rc = qed_sp_pf_start(p_hwfn, p_hwfn->cdev->mf_mode);
+ rc = qed_sp_pf_start(p_hwfn, p_tunn, p_hwfn->cdev->mf_mode,
+ allow_npar_tx_switch);
if (rc)
DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
}
@@ -672,6 +812,7 @@ static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
}
int qed_hw_init(struct qed_dev *cdev,
+ struct qed_tunn_start_params *p_tunn,
bool b_hw_start,
enum qed_int_mode int_mode,
bool allow_npar_tx_switch,
@@ -680,13 +821,20 @@ int qed_hw_init(struct qed_dev *cdev,
u32 load_code, param;
int rc, mfw_rc, i;
- rc = qed_init_fw_data(cdev, bin_fw_data);
- if (rc != 0)
- return rc;
+ if (IS_PF(cdev)) {
+ rc = qed_init_fw_data(cdev, bin_fw_data);
+ if (rc != 0)
+ return rc;
+ }
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ if (IS_VF(cdev)) {
+ p_hwfn->b_int_enabled = 1;
+ continue;
+ }
+
/* Enable DMAE in PXP */
rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
@@ -708,6 +856,11 @@ int qed_hw_init(struct qed_dev *cdev,
p_hwfn->first_on_engine = (load_code ==
FW_MSG_CODE_DRV_LOAD_ENGINE);
+ if (!qm_lock_init) {
+ spin_lock_init(&qm_lock);
+ qm_lock_init = true;
+ }
+
switch (load_code) {
case FW_MSG_CODE_DRV_LOAD_ENGINE:
rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
@@ -724,7 +877,7 @@ int qed_hw_init(struct qed_dev *cdev,
/* Fall into */
case FW_MSG_CODE_DRV_LOAD_FUNCTION:
rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
- p_hwfn->hw_info.hw_mode,
+ p_tunn, p_hwfn->hw_info.hw_mode,
b_hw_start, int_mode,
allow_npar_tx_switch);
break;
@@ -749,6 +902,20 @@ int qed_hw_init(struct qed_dev *cdev,
return mfw_rc;
}
+ /* send DCBX attention request command */
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DCB,
+ "sending phony dcbx set command to trigger DCBx attention handling\n");
+ mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_SET_DCBX,
+ 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT,
+ &load_code, &param);
+ if (mfw_rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed to send DCBX attention request\n");
+ return mfw_rc;
+ }
+
p_hwfn->hw_init_done = true;
}
@@ -811,6 +978,11 @@ int qed_hw_stop(struct qed_dev *cdev)
DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n");
+ if (IS_VF(cdev)) {
+ qed_vf_pf_int_cleanup(p_hwfn);
+ continue;
+ }
+
/* mark the hw as uninitialized... */
p_hwfn->hw_init_done = false;
@@ -842,15 +1014,16 @@ int qed_hw_stop(struct qed_dev *cdev)
usleep_range(1000, 2000);
}
- /* Disable DMAE in PXP - in CMT, this should only be done for
- * first hw-function, and only after all transactions have
- * stopped for all active hw-functions.
- */
- t_rc = qed_change_pci_hwfn(&cdev->hwfns[0],
- cdev->hwfns[0].p_main_ptt,
- false);
- if (t_rc != 0)
- rc = t_rc;
+ if (IS_PF(cdev)) {
+ /* Disable DMAE in PXP - in CMT, this should only be done for
+ * first hw-function, and only after all transactions have
+ * stopped for all active hw-functions.
+ */
+ t_rc = qed_change_pci_hwfn(&cdev->hwfns[0],
+ cdev->hwfns[0].p_main_ptt, false);
+ if (t_rc != 0)
+ rc = t_rc;
+ }
return rc;
}
@@ -861,7 +1034,12 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev)
for_each_hwfn(cdev, j) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
- struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
+ struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
+
+ if (IS_VF(cdev)) {
+ qed_vf_pf_int_cleanup(p_hwfn);
+ continue;
+ }
DP_VERBOSE(p_hwfn,
NETIF_MSG_IFDOWN,
@@ -885,6 +1063,9 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev)
void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
{
+ if (IS_VF(p_hwfn->cdev))
+ return;
+
/* Re-open incoming traffic */
qed_wr(p_hwfn, p_hwfn->p_main_ptt,
NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
@@ -914,6 +1095,13 @@ int qed_hw_reset(struct qed_dev *cdev)
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ if (IS_VF(cdev)) {
+ rc = qed_vf_pf_reset(p_hwfn);
+ if (rc)
+ return rc;
+ continue;
+ }
+
DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n");
/* Check for incorrect states */
@@ -1009,13 +1197,19 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
{
u32 *resc_start = p_hwfn->hw_info.resc_start;
+ u8 num_funcs = p_hwfn->num_funcs_on_engine;
u32 *resc_num = p_hwfn->hw_info.resc_num;
struct qed_sb_cnt_info sb_cnt_info;
- int num_funcs, i;
-
- num_funcs = MAX_NUM_PFS_BB;
+ int i, max_vf_vlan_filters;
memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
+
+#ifdef CONFIG_QED_SRIOV
+ max_vf_vlan_filters = QED_ETH_MAX_VF_NUM_VLAN_FILTERS;
+#else
+ max_vf_vlan_filters = 0;
+#endif
+
qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
resc_num[QED_SB] = min_t(u32,
@@ -1220,6 +1414,51 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
}
+static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 reg_function_hide, tmp, eng_mask;
+ u8 num_funcs;
+
+ num_funcs = MAX_NUM_PFS_BB;
+
+ /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values
+ * in the other bits are selected.
+ * Bits 1-15 are for functions 1-15, respectively, and their value is
+ * '0' only for enabled functions (function 0 always exists and
+ * enabled).
+ * In case of CMT, only the "even" functions are enabled, and thus the
+ * number of functions for both hwfns is learnt from the same bits.
+ */
+ reg_function_hide = qed_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE);
+
+ if (reg_function_hide & 0x1) {
+ if (QED_PATH_ID(p_hwfn) && p_hwfn->cdev->num_hwfns == 1) {
+ num_funcs = 0;
+ eng_mask = 0xaaaa;
+ } else {
+ num_funcs = 1;
+ eng_mask = 0x5554;
+ }
+
+ /* Get the number of the enabled functions on the engine */
+ tmp = (reg_function_hide ^ 0xffffffff) & eng_mask;
+ while (tmp) {
+ if (tmp & 0x1)
+ num_funcs++;
+ tmp >>= 0x1;
+ }
+ }
+
+ p_hwfn->num_funcs_on_engine = num_funcs;
+
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_PROBE,
+ "PF [rel_id %d, abs_id %d] within the %d enabled functions on the engine\n",
+ p_hwfn->rel_pf_id,
+ p_hwfn->abs_pf_id,
+ p_hwfn->num_funcs_on_engine);
+}
+
static int
qed_get_hw_info(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -1228,6 +1467,13 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
u32 port_mode;
int rc;
+ /* Since all information is common, only first hwfns should do this */
+ if (IS_LEAD_HWFN(p_hwfn)) {
+ rc = qed_iov_hw_info(p_hwfn);
+ if (rc)
+ return rc;
+ }
+
/* Read the port mode */
port_mode = qed_rd(p_hwfn, p_ptt,
CNIG_REG_NW_PORT_MODE_BB_B0);
@@ -1271,6 +1517,8 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
p_hwfn->hw_info.personality = protocol;
}
+ qed_get_num_funcs(p_hwfn, p_ptt);
+
qed_hw_get_resc(p_hwfn);
return rc;
@@ -1336,6 +1584,9 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
p_hwfn->regview = p_regview;
p_hwfn->doorbells = p_doorbells;
+ if (IS_VF(p_hwfn->cdev))
+ return qed_vf_hw_prepare(p_hwfn);
+
/* Validate that chip access is feasible */
if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
DP_ERR(p_hwfn,
@@ -1387,6 +1638,8 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
return rc;
err2:
+ if (IS_LEAD_HWFN(p_hwfn))
+ qed_iov_free_hw_info(p_hwfn->cdev);
qed_mcp_free(p_hwfn);
err1:
qed_hw_hwfn_free(p_hwfn);
@@ -1401,7 +1654,8 @@ int qed_hw_prepare(struct qed_dev *cdev,
int rc;
/* Store the precompiled init data ptrs */
- qed_init_iro_array(cdev);
+ if (IS_PF(cdev))
+ qed_init_iro_array(cdev);
/* Initialize the first hwfn - will learn number of hwfns */
rc = qed_hw_prepare_single(p_hwfn,
@@ -1433,9 +1687,11 @@ int qed_hw_prepare(struct qed_dev *cdev,
* initiliazed hwfn 0.
*/
if (rc) {
- qed_init_free(p_hwfn);
- qed_mcp_free(p_hwfn);
- qed_hw_hwfn_free(p_hwfn);
+ if (IS_PF(cdev)) {
+ qed_init_free(p_hwfn);
+ qed_mcp_free(p_hwfn);
+ qed_hw_hwfn_free(p_hwfn);
+ }
}
}
@@ -1449,10 +1705,17 @@ void qed_hw_remove(struct qed_dev *cdev)
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ if (IS_VF(cdev)) {
+ qed_vf_pf_release(p_hwfn);
+ continue;
+ }
+
qed_init_free(p_hwfn);
qed_hw_hwfn_free(p_hwfn);
qed_mcp_free(p_hwfn);
}
+
+ qed_iov_free_hw_info(cdev);
}
int qed_chain_alloc(struct qed_dev *cdev,
@@ -1593,3 +1856,388 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
return 0;
}
+
+/* Calculate final WFQ values for all vports and configure them.
+ * After this configuration each vport will have
+ * approx min rate = min_pf_rate * (vport_wfq / QED_WFQ_UNIT)
+ */
+static void qed_configure_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 min_pf_rate)
+{
+ struct init_qm_vport_params *vport_params;
+ int i;
+
+ vport_params = p_hwfn->qm_info.qm_vport_params;
+
+ for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
+ u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
+
+ vport_params[i].vport_wfq = (wfq_speed * QED_WFQ_UNIT) /
+ min_pf_rate;
+ qed_init_vport_wfq(p_hwfn, p_ptt,
+ vport_params[i].first_tx_pq_id,
+ vport_params[i].vport_wfq);
+ }
+}
+
+static void qed_init_wfq_default_param(struct qed_hwfn *p_hwfn,
+ u32 min_pf_rate)
+
+{
+ int i;
+
+ for (i = 0; i < p_hwfn->qm_info.num_vports; i++)
+ p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1;
+}
+
+static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 min_pf_rate)
+{
+ struct init_qm_vport_params *vport_params;
+ int i;
+
+ vport_params = p_hwfn->qm_info.qm_vport_params;
+
+ for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
+ qed_init_wfq_default_param(p_hwfn, min_pf_rate);
+ qed_init_vport_wfq(p_hwfn, p_ptt,
+ vport_params[i].first_tx_pq_id,
+ vport_params[i].vport_wfq);
+ }
+}
+
+/* This function performs several validations for WFQ
+ * configuration and required min rate for a given vport
+ * 1. req_rate must be greater than one percent of min_pf_rate.
+ * 2. req_rate should not cause other vports [not configured for WFQ explicitly]
+ * rates to get less than one percent of min_pf_rate.
+ * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate.
+ */
+static int qed_init_wfq_param(struct qed_hwfn *p_hwfn,
+ u16 vport_id, u32 req_rate,
+ u32 min_pf_rate)
+{
+ u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0;
+ int non_requested_count = 0, req_count = 0, i, num_vports;
+
+ num_vports = p_hwfn->qm_info.num_vports;
+
+ /* Accounting for the vports which are configured for WFQ explicitly */
+ for (i = 0; i < num_vports; i++) {
+ u32 tmp_speed;
+
+ if ((i != vport_id) &&
+ p_hwfn->qm_info.wfq_data[i].configured) {
+ req_count++;
+ tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
+ total_req_min_rate += tmp_speed;
+ }
+ }
+
+ /* Include current vport data as well */
+ req_count++;
+ total_req_min_rate += req_rate;
+ non_requested_count = num_vports - req_count;
+
+ if (req_rate < min_pf_rate / QED_WFQ_UNIT) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
+ vport_id, req_rate, min_pf_rate);
+ return -EINVAL;
+ }
+
+ if (num_vports > QED_WFQ_UNIT) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Number of vports is greater than %d\n",
+ QED_WFQ_UNIT);
+ return -EINVAL;
+ }
+
+ if (total_req_min_rate > min_pf_rate) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n",
+ total_req_min_rate, min_pf_rate);
+ return -EINVAL;
+ }
+
+ total_left_rate = min_pf_rate - total_req_min_rate;
+
+ left_rate_per_vp = total_left_rate / non_requested_count;
+ if (left_rate_per_vp < min_pf_rate / QED_WFQ_UNIT) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
+ left_rate_per_vp, min_pf_rate);
+ return -EINVAL;
+ }
+
+ p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate;
+ p_hwfn->qm_info.wfq_data[vport_id].configured = true;
+
+ for (i = 0; i < num_vports; i++) {
+ if (p_hwfn->qm_info.wfq_data[i].configured)
+ continue;
+
+ p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp;
+ }
+
+ return 0;
+}
+
+static int __qed_configure_vport_wfq(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 vp_id, u32 rate)
+{
+ struct qed_mcp_link_state *p_link;
+ int rc = 0;
+
+ p_link = &p_hwfn->cdev->hwfns[0].mcp_info->link_output;
+
+ if (!p_link->min_pf_rate) {
+ p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate;
+ p_hwfn->qm_info.wfq_data[vp_id].configured = true;
+ return rc;
+ }
+
+ rc = qed_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate);
+
+ if (rc == 0)
+ qed_configure_wfq_for_all_vports(p_hwfn, p_ptt,
+ p_link->min_pf_rate);
+ else
+ DP_NOTICE(p_hwfn,
+ "Validation failed while configuring min rate\n");
+
+ return rc;
+}
+
+static int __qed_configure_vp_wfq_on_link_change(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 min_pf_rate)
+{
+ bool use_wfq = false;
+ int rc = 0;
+ u16 i;
+
+ /* Validate all pre configured vports for wfq */
+ for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
+ u32 rate;
+
+ if (!p_hwfn->qm_info.wfq_data[i].configured)
+ continue;
+
+ rate = p_hwfn->qm_info.wfq_data[i].min_speed;
+ use_wfq = true;
+
+ rc = qed_init_wfq_param(p_hwfn, i, rate, min_pf_rate);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "WFQ validation failed while configuring min rate\n");
+ break;
+ }
+ }
+
+ if (!rc && use_wfq)
+ qed_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
+ else
+ qed_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
+
+ return rc;
+}
+
+/* Main API for qed clients to configure vport min rate.
+ * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)]
+ * rate - Speed in Mbps needs to be assigned to a given vport.
+ */
+int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate)
+{
+ int i, rc = -EINVAL;
+
+ /* Currently not supported; Might change in future */
+ if (cdev->num_hwfns > 1) {
+ DP_NOTICE(cdev,
+ "WFQ configuration is not supported for this device\n");
+ return rc;
+ }
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ struct qed_ptt *p_ptt;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EBUSY;
+
+ rc = __qed_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate);
+
+ if (!rc) {
+ qed_ptt_release(p_hwfn, p_ptt);
+ return rc;
+ }
+
+ qed_ptt_release(p_hwfn, p_ptt);
+ }
+
+ return rc;
+}
+
+/* API to configure WFQ from mcp link change */
+void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate)
+{
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ __qed_configure_vp_wfq_on_link_change(p_hwfn,
+ p_hwfn->p_dpc_ptt,
+ min_pf_rate);
+ }
+}
+
+int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mcp_link_state *p_link,
+ u8 max_bw)
+{
+ int rc = 0;
+
+ p_hwfn->mcp_info->func_info.bandwidth_max = max_bw;
+
+ if (!p_link->line_speed && (max_bw != 100))
+ return rc;
+
+ p_link->speed = (p_link->line_speed * max_bw) / 100;
+ p_hwfn->qm_info.pf_rl = p_link->speed;
+
+ /* Since the limiter also affects Tx-switched traffic, we don't want it
+ * to limit such traffic in case there's no actual limit.
+ * In that case, set limit to imaginary high boundary.
+ */
+ if (max_bw == 100)
+ p_hwfn->qm_info.pf_rl = 100000;
+
+ rc = qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
+ p_hwfn->qm_info.pf_rl);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Configured MAX bandwidth to be %08x Mb/sec\n",
+ p_link->speed);
+
+ return rc;
+}
+
+/* Main API to configure PF max bandwidth where bw range is [1 - 100] */
+int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw)
+{
+ int i, rc = -EINVAL;
+
+ if (max_bw < 1 || max_bw > 100) {
+ DP_NOTICE(cdev, "PF max bw valid range is [1-100]\n");
+ return rc;
+ }
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev);
+ struct qed_mcp_link_state *p_link;
+ struct qed_ptt *p_ptt;
+
+ p_link = &p_lead->mcp_info->link_output;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EBUSY;
+
+ rc = __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt,
+ p_link, max_bw);
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ if (rc)
+ break;
+ }
+
+ return rc;
+}
+
+int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mcp_link_state *p_link,
+ u8 min_bw)
+{
+ int rc = 0;
+
+ p_hwfn->mcp_info->func_info.bandwidth_min = min_bw;
+ p_hwfn->qm_info.pf_wfq = min_bw;
+
+ if (!p_link->line_speed)
+ return rc;
+
+ p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100;
+
+ rc = qed_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Configured MIN bandwidth to be %d Mb/sec\n",
+ p_link->min_pf_rate);
+
+ return rc;
+}
+
+/* Main API to configure PF min bandwidth where bw range is [1-100] */
+int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw)
+{
+ int i, rc = -EINVAL;
+
+ if (min_bw < 1 || min_bw > 100) {
+ DP_NOTICE(cdev, "PF min bw valid range is [1-100]\n");
+ return rc;
+ }
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev);
+ struct qed_mcp_link_state *p_link;
+ struct qed_ptt *p_ptt;
+
+ p_link = &p_lead->mcp_info->link_output;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EBUSY;
+
+ rc = __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt,
+ p_link, min_bw);
+ if (rc) {
+ qed_ptt_release(p_hwfn, p_ptt);
+ return rc;
+ }
+
+ if (p_link->min_pf_rate) {
+ u32 min_rate = p_link->min_pf_rate;
+
+ rc = __qed_configure_vp_wfq_on_link_change(p_hwfn,
+ p_ptt,
+ min_rate);
+ }
+
+ qed_ptt_release(p_hwfn, p_ptt);
+ }
+
+ return rc;
+}
+
+void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_link_state *p_link;
+
+ p_link = &p_hwfn->mcp_info->link_output;
+
+ if (p_link->min_pf_rate)
+ qed_disable_wfq_for_all_vports(p_hwfn, p_ptt,
+ p_link->min_pf_rate);
+
+ memset(p_hwfn->qm_info.wfq_data, 0,
+ sizeof(*p_hwfn->qm_info.wfq_data) * p_hwfn->qm_info.num_vports);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
index d6c7ddf..dde364d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -62,6 +62,7 @@ void qed_resc_setup(struct qed_dev *cdev);
* @brief qed_hw_init -
*
* @param cdev
+ * @param p_tunn
* @param b_hw_start
* @param int_mode - interrupt mode [msix, inta, etc.] to use.
* @param allow_npar_tx_switch - npar tx switching to be used
@@ -72,6 +73,7 @@ void qed_resc_setup(struct qed_dev *cdev);
* @return int
*/
int qed_hw_init(struct qed_dev *cdev,
+ struct qed_tunn_start_params *p_tunn,
bool b_hw_start,
enum qed_int_mode int_mode,
bool allow_npar_tx_switch,
@@ -180,11 +182,15 @@ enum qed_dmae_address_type_t {
* used mostly to write a zeroed buffer to destination address
* using DMA
*/
-#define QED_DMAE_FLAG_RW_REPL_SRC 0x00000001
-#define QED_DMAE_FLAG_COMPLETION_DST 0x00000008
+#define QED_DMAE_FLAG_RW_REPL_SRC 0x00000001
+#define QED_DMAE_FLAG_VF_SRC 0x00000002
+#define QED_DMAE_FLAG_VF_DST 0x00000004
+#define QED_DMAE_FLAG_COMPLETION_DST 0x00000008
struct qed_dmae_params {
- u32 flags; /* consists of QED_DMAE_FLAG_* values */
+ u32 flags; /* consists of QED_DMAE_FLAG_* values */
+ u8 src_vfid;
+ u8 dst_vfid;
};
/**
@@ -207,6 +213,23 @@ qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
u32 flags);
/**
+ * @brief qed_dmae_host2host - copy data from to source address
+ * to a destination adress (for SRIOV) using the given ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param source_addr
+ * @param dest_addr
+ * @param size_in_dwords
+ * @param params
+ */
+int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ dma_addr_t source_addr,
+ dma_addr_t dest_addr,
+ u32 size_in_dwords, struct qed_dmae_params *p_params);
+
+/**
* @brief qed_chain_alloc - Allocate and initialize a chain
*
* @param p_hwfn
@@ -280,11 +303,11 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
* @param p_hwfn
* @param p_ptt
* @param id - For PF, engine-relative. For VF, PF-relative.
+ * @param is_vf - true iff cleanup is made for a VF.
*
* @return int
*/
int qed_final_cleanup(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u16 id);
+ struct qed_ptt *p_ptt, u16 id, bool is_vf);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index a368f5e..9afc15f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -29,9 +29,9 @@ struct qed_ptt;
enum common_event_opcode {
COMMON_EVENT_PF_START,
COMMON_EVENT_PF_STOP,
- COMMON_EVENT_RESERVED,
- COMMON_EVENT_RESERVED2,
- COMMON_EVENT_RESERVED3,
+ COMMON_EVENT_VF_START,
+ COMMON_EVENT_VF_STOP,
+ COMMON_EVENT_VF_PF_CHANNEL,
COMMON_EVENT_RESERVED4,
COMMON_EVENT_RESERVED5,
COMMON_EVENT_RESERVED6,
@@ -44,9 +44,9 @@ enum common_ramrod_cmd_id {
COMMON_RAMROD_UNUSED,
COMMON_RAMROD_PF_START /* PF Function Start Ramrod */,
COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */,
- COMMON_RAMROD_RESERVED,
- COMMON_RAMROD_RESERVED2,
- COMMON_RAMROD_RESERVED3,
+ COMMON_RAMROD_VF_START,
+ COMMON_RAMROD_VF_STOP,
+ COMMON_RAMROD_PF_UPDATE,
COMMON_RAMROD_EMPTY,
MAX_COMMON_RAMROD_CMD_ID
};
@@ -573,6 +573,14 @@ union event_ring_element {
struct event_ring_next_addr next_addr;
};
+struct mstorm_non_trigger_vf_zone {
+ struct eth_mstorm_per_queue_stat eth_queue_stat;
+};
+
+struct mstorm_vf_zone {
+ struct mstorm_non_trigger_vf_zone non_trigger;
+};
+
enum personality_type {
BAD_PERSONALITY_TYP,
PERSONALITY_RESERVED,
@@ -626,6 +634,59 @@ struct pf_start_ramrod_data {
u8 reserved0[4];
};
+/* Data for port update ramrod */
+struct protocol_dcb_data {
+ u8 dcb_enable_flag;
+ u8 dcb_priority;
+ u8 dcb_tc;
+ u8 reserved;
+};
+
+/* tunnel configuration */
+struct pf_update_tunnel_config {
+ u8 update_rx_pf_clss;
+ u8 update_tx_pf_clss;
+ u8 set_vxlan_udp_port_flg;
+ u8 set_geneve_udp_port_flg;
+ u8 tx_enable_vxlan;
+ u8 tx_enable_l2geneve;
+ u8 tx_enable_ipgeneve;
+ u8 tx_enable_l2gre;
+ u8 tx_enable_ipgre;
+ u8 tunnel_clss_vxlan;
+ u8 tunnel_clss_l2geneve;
+ u8 tunnel_clss_ipgeneve;
+ u8 tunnel_clss_l2gre;
+ u8 tunnel_clss_ipgre;
+ __le16 vxlan_udp_port;
+ __le16 geneve_udp_port;
+ __le16 reserved[3];
+};
+
+struct pf_update_ramrod_data {
+ u8 pf_id;
+ u8 update_eth_dcb_data_flag;
+ u8 update_fcoe_dcb_data_flag;
+ u8 update_iscsi_dcb_data_flag;
+ u8 update_roce_dcb_data_flag;
+ u8 update_mf_vlan_flag;
+ __le16 mf_vlan;
+ struct protocol_dcb_data eth_dcb_data;
+ struct protocol_dcb_data fcoe_dcb_data;
+ struct protocol_dcb_data iscsi_dcb_data;
+ struct protocol_dcb_data roce_dcb_data;
+ struct pf_update_tunnel_config tunnel_config;
+};
+
+/* Tunnel classification scheme */
+enum tunnel_clss {
+ TUNNEL_CLSS_MAC_VLAN = 0,
+ TUNNEL_CLSS_MAC_VNI,
+ TUNNEL_CLSS_INNER_MAC_VLAN,
+ TUNNEL_CLSS_INNER_MAC_VNI,
+ MAX_TUNNEL_CLSS
+};
+
enum ports_mode {
ENGX2_PORTX1 /* 2 engines x 1 port */,
ENGX2_PORTX2 /* 2 engines x 2 ports */,
@@ -635,6 +696,16 @@ enum ports_mode {
MAX_PORTS_MODE
};
+struct pstorm_non_trigger_vf_zone {
+ struct eth_pstorm_per_queue_stat eth_queue_stat;
+ struct regpair reserved[2];
+};
+
+struct pstorm_vf_zone {
+ struct pstorm_non_trigger_vf_zone non_trigger;
+ struct regpair reserved[7];
+};
+
/* Ramrod Header of SPQE */
struct ramrod_header {
__le32 cid /* Slowpath Connection CID */;
@@ -664,6 +735,36 @@ struct tstorm_per_port_stat {
struct regpair preroce_irregular_pkt;
};
+struct ustorm_non_trigger_vf_zone {
+ struct eth_ustorm_per_queue_stat eth_queue_stat;
+ struct regpair vf_pf_msg_addr;
+};
+
+struct ustorm_trigger_vf_zone {
+ u8 vf_pf_msg_valid;
+ u8 reserved[7];
+};
+
+struct ustorm_vf_zone {
+ struct ustorm_non_trigger_vf_zone non_trigger;
+ struct ustorm_trigger_vf_zone trigger;
+};
+
+struct vf_start_ramrod_data {
+ u8 vf_id;
+ u8 enable_flr_ack;
+ __le16 opaque_fid;
+ u8 personality;
+ u8 reserved[3];
+};
+
+struct vf_stop_ramrod_data {
+ u8 vf_id;
+ u8 reserved0;
+ __le16 reserved1;
+ __le32 reserved2;
+};
+
struct atten_status_block {
__le32 atten_bits;
__le32 atten_ack;
@@ -990,7 +1091,7 @@ enum init_phases {
PHASE_ENGINE,
PHASE_PORT,
PHASE_PF,
- PHASE_RESERVED,
+ PHASE_VF,
PHASE_QM_PF,
MAX_INIT_PHASES
};
@@ -1603,6 +1704,19 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
u16 start_pq,
u16 num_pqs);
+void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 dest_port);
+void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, bool vxlan_enable);
+void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, bool eth_gre_enable,
+ bool ip_gre_enable);
+void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 dest_port);
+void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, bool eth_geneve_enable,
+ bool ip_geneve_enable);
+
/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size)
@@ -3788,7 +3902,7 @@ struct public_drv_mb {
#define DRV_MSG_CODE_SET_LLDP 0x24000000
#define DRV_MSG_CODE_SET_DCBX 0x25000000
-
+#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
#define DRV_MSG_CODE_INITIATE_FLR 0x02000000
@@ -3808,6 +3922,7 @@ struct public_drv_mb {
#define DRV_MSG_CODE_PHY_CORE_WRITE 0x000e0000
#define DRV_MSG_CODE_SET_VERSION 0x000f0000
+#define DRV_MSG_CODE_BIST_TEST 0x001e0000
#define DRV_MSG_CODE_SET_LED_MODE 0x00200000
#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
@@ -3865,6 +3980,18 @@ struct public_drv_mb {
#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1
#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2
+#define DRV_MB_PARAM_BIST_UNKNOWN_TEST 0
+#define DRV_MB_PARAM_BIST_REGISTER_TEST 1
+#define DRV_MB_PARAM_BIST_CLOCK_TEST 2
+
+#define DRV_MB_PARAM_BIST_RC_UNKNOWN 0
+#define DRV_MB_PARAM_BIST_RC_PASSED 1
+#define DRV_MB_PARAM_BIST_RC_FAILED 2
+#define DRV_MB_PARAM_BIST_RC_INVALID_PARAMETER 3
+
+#define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT 0
+#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK 0x000000FF
+
u32 fw_mb_header;
#define FW_MSG_CODE_MASK 0xffff0000
#define FW_MSG_CODE_DRV_LOAD_ENGINE 0x10100000
@@ -5067,4 +5194,8 @@ struct hw_set_image {
struct hw_set_info hw_sets[1];
};
+int qed_init_pf_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u8 pf_id, u16 pf_wfq);
+int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
index a95a3e4..0ada7fd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -23,6 +23,7 @@
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_reg_addr.h"
+#include "qed_sriov.h"
#define QED_BAR_ACQUIRE_TIMEOUT 1000
@@ -236,8 +237,12 @@ static void qed_memcpy_hw(struct qed_hwfn *p_hwfn,
quota = min_t(size_t, n - done,
PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE);
- qed_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
- hw_offset = qed_ptt_get_bar_addr(p_ptt);
+ if (IS_PF(p_hwfn->cdev)) {
+ qed_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
+ hw_offset = qed_ptt_get_bar_addr(p_ptt);
+ } else {
+ hw_offset = hw_addr + done;
+ }
dw_count = quota / 4;
host_addr = (u32 *)((u8 *)addr + done);
@@ -338,14 +343,25 @@ void qed_port_unpretend(struct qed_hwfn *p_hwfn,
*(u32 *)&p_ptt->pxp.pretend);
}
+u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid)
+{
+ u32 concrete_fid = 0;
+
+ SET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID, p_hwfn->rel_pf_id);
+ SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID, vfid);
+ SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID, 1);
+
+ return concrete_fid;
+}
+
/* DMAE */
static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
const u8 is_src_type_grc,
const u8 is_dst_type_grc,
struct qed_dmae_params *p_params)
{
+ u16 opcode_b = 0;
u32 opcode = 0;
- u16 opcodeB = 0;
/* Whether the source is the PCIe or the GRC.
* 0- The source is the PCIe
@@ -387,14 +403,24 @@ static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
opcode |= (DMAE_CMD_DST_ADDR_RESET_MASK <<
DMAE_CMD_DST_ADDR_RESET_SHIFT);
- opcodeB |= (DMAE_CMD_SRC_VF_ID_MASK <<
- DMAE_CMD_SRC_VF_ID_SHIFT);
+ /* SRC/DST VFID: all 1's - pf, otherwise VF id */
+ if (p_params->flags & QED_DMAE_FLAG_VF_SRC) {
+ opcode |= 1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT;
+ opcode_b |= p_params->src_vfid << DMAE_CMD_SRC_VF_ID_SHIFT;
+ } else {
+ opcode_b |= DMAE_CMD_SRC_VF_ID_MASK <<
+ DMAE_CMD_SRC_VF_ID_SHIFT;
+ }
- opcodeB |= (DMAE_CMD_DST_VF_ID_MASK <<
- DMAE_CMD_DST_VF_ID_SHIFT);
+ if (p_params->flags & QED_DMAE_FLAG_VF_DST) {
+ opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT;
+ opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT;
+ } else {
+ opcode_b |= DMAE_CMD_DST_VF_ID_MASK << DMAE_CMD_DST_VF_ID_SHIFT;
+ }
p_hwfn->dmae_info.p_dmae_cmd->opcode = cpu_to_le32(opcode);
- p_hwfn->dmae_info.p_dmae_cmd->opcode_b = cpu_to_le16(opcodeB);
+ p_hwfn->dmae_info.p_dmae_cmd->opcode_b = cpu_to_le16(opcode_b);
}
u32 qed_dmae_idx_to_go_cmd(u8 idx)
@@ -742,6 +768,28 @@ int qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
return rc;
}
+int
+qed_dmae_host2host(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ dma_addr_t source_addr,
+ dma_addr_t dest_addr,
+ u32 size_in_dwords, struct qed_dmae_params *p_params)
+{
+ int rc;
+
+ mutex_lock(&(p_hwfn->dmae_info.mutex));
+
+ rc = qed_dmae_execute_command(p_hwfn, p_ptt, source_addr,
+ dest_addr,
+ QED_DMAE_ADDRESS_HOST_PHYS,
+ QED_DMAE_ADDRESS_HOST_PHYS,
+ size_in_dwords, p_params);
+
+ mutex_unlock(&(p_hwfn->dmae_info.mutex));
+
+ return rc;
+}
+
u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
enum protocol_type proto,
union qed_qm_pq_params *p_params)
@@ -765,6 +813,9 @@ u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
break;
case PROTOCOLID_ETH:
pq_id = p_params->eth.tc;
+ if (p_params->eth.is_vf)
+ pq_id += p_hwfn->qm_info.vf_queues_offset +
+ p_params->eth.vf_id;
break;
default:
pq_id = 0;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h
index e56d433..4367363 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h
@@ -221,6 +221,16 @@ void qed_port_unpretend(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
+ * @brief qed_vfid_to_concrete - build a concrete FID for a
+ * given VF ID
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vfid
+ */
+u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid);
+
+/**
* @brief qed_dmae_idx_to_go_cmd - map the idx to dmae cmd
* this is declared here since other files will require it.
* @param idx
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index f55ebdc..e8a3b9d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -712,6 +712,21 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
return 0;
}
+int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 pf_id, u16 pf_wfq)
+{
+ u32 inc_val = QM_WFQ_INC_VAL(pf_wfq);
+
+ if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration");
+ return -1;
+ }
+
+ qed_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
+ return 0;
+}
+
int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u8 pf_id,
@@ -732,6 +747,31 @@ int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
return 0;
}
+int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 first_tx_pq_id[NUM_OF_TCS],
+ u16 vport_wfq)
+{
+ u32 inc_val = QM_WFQ_INC_VAL(vport_wfq);
+ u8 tc;
+
+ if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, "Invalid VPORT WFQ weight configuration");
+ return -1;
+ }
+
+ for (tc = 0; tc < NUM_OF_TCS; tc++) {
+ u16 vport_pq_id = first_tx_pq_id[tc];
+
+ if (vport_pq_id != QM_INVALID_PQ_ID)
+ qed_wr(p_hwfn, p_ptt,
+ QM_REG_WFQVPWEIGHT + vport_pq_id * 4,
+ inc_val);
+ }
+
+ return 0;
+}
+
int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u8 vport_id,
@@ -788,3 +828,130 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
return true;
}
+
+static void
+qed_set_tunnel_type_enable_bit(unsigned long *var, int bit, bool enable)
+{
+ if (enable)
+ set_bit(bit, var);
+ else
+ clear_bit(bit, var);
+}
+
+#define PRS_ETH_TUNN_FIC_FORMAT -188897008
+
+void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 dest_port)
+{
+ qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_PORT, dest_port);
+ qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
+}
+
+void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool vxlan_enable)
+{
+ unsigned long reg_val = 0;
+ u8 shift;
+
+ reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
+ shift = PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT;
+ qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);
+
+ qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
+
+ if (reg_val)
+ qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
+ PRS_ETH_TUNN_FIC_FORMAT);
+
+ reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
+ shift = NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT;
+ qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);
+
+ qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
+
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
+ vxlan_enable ? 1 : 0);
+}
+
+void qed_set_gre_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ bool eth_gre_enable, bool ip_gre_enable)
+{
+ unsigned long reg_val = 0;
+ u8 shift;
+
+ reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
+ shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT;
+ qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);
+
+ shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT;
+ qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
+ if (reg_val)
+ qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
+ PRS_ETH_TUNN_FIC_FORMAT);
+
+ reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
+ shift = NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT;
+ qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);
+
+ shift = NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT;
+ qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
+
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
+ eth_gre_enable ? 1 : 0);
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
+ ip_gre_enable ? 1 : 0);
+}
+
+void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 dest_port)
+{
+ qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
+ qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
+}
+
+void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool eth_geneve_enable,
+ bool ip_geneve_enable)
+{
+ unsigned long reg_val = 0;
+ u8 shift;
+
+ reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
+ shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT;
+ qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_geneve_enable);
+
+ shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT;
+ qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_geneve_enable);
+
+ qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
+ if (reg_val)
+ qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
+ PRS_ETH_TUNN_FIC_FORMAT);
+
+ qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
+ eth_geneve_enable ? 1 : 0);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0);
+
+ /* comp ver */
+ reg_val = (ip_geneve_enable || eth_geneve_enable) ? 1 : 0;
+ qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_COMP_VER, reg_val);
+ qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_COMP_VER, reg_val);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_COMP_VER, reg_val);
+
+ /* EDPM with geneve tunnel not supported in BB_B0 */
+ if (QED_IS_BB_B0(p_hwfn->cdev))
+ return;
+
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN,
+ eth_geneve_enable ? 1 : 0);
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN,
+ ip_geneve_enable ? 1 : 0);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
index 3269b36..d358c3b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -18,6 +18,7 @@
#include "qed_hw.h"
#include "qed_init_ops.h"
#include "qed_reg_addr.h"
+#include "qed_sriov.h"
#define QED_INIT_MAX_POLL_COUNT 100
#define QED_INIT_POLL_PERIOD_US 500
@@ -128,6 +129,9 @@ int qed_init_alloc(struct qed_hwfn *p_hwfn)
{
struct qed_rt_data *rt_data = &p_hwfn->rt_data;
+ if (IS_VF(p_hwfn->cdev))
+ return 0;
+
rt_data->b_valid = kzalloc(sizeof(bool) * RUNTIME_ARRAY_SIZE,
GFP_KERNEL);
if (!rt_data->b_valid)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 2017b01..09a6ad3 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -26,6 +26,8 @@
#include "qed_mcp.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
+#include "qed_sriov.h"
+#include "qed_vf.h"
struct qed_pi_info {
qed_int_comp_cb_t comp_cb;
@@ -2513,6 +2515,9 @@ void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
u32 sb_offset;
u32 pi_offset;
+ if (IS_VF(p_hwfn->cdev))
+ return;
+
sb_offset = igu_sb_id * PIS_PER_SB;
memset(&pi_entry, 0, sizeof(struct cau_pi_entry));
@@ -2542,8 +2547,9 @@ void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
sb_info->sb_ack = 0;
memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
- qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
- sb_info->igu_sb_id, 0, 0);
+ if (IS_PF(p_hwfn->cdev))
+ qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
+ sb_info->igu_sb_id, 0, 0);
}
/**
@@ -2563,8 +2569,10 @@ static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn,
/* Assuming continuous set of IGU SBs dedicated for given PF */
if (sb_id == QED_SP_SB_ID)
igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
- else
+ else if (IS_PF(p_hwfn->cdev))
igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb;
+ else
+ igu_sb_id = qed_vf_get_igu_sb_id(p_hwfn, sb_id);
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "SB [%s] index is 0x%04x\n",
(sb_id == QED_SP_SB_ID) ? "DSB" : "non-DSB", igu_sb_id);
@@ -2594,9 +2602,16 @@ int qed_int_sb_init(struct qed_hwfn *p_hwfn,
/* The igu address will hold the absolute address that needs to be
* written to for a specific status block
*/
- sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
- GTT_BAR0_MAP_REG_IGU_CMD +
- (sb_info->igu_sb_id << 3);
+ if (IS_PF(p_hwfn->cdev)) {
+ sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
+ GTT_BAR0_MAP_REG_IGU_CMD +
+ (sb_info->igu_sb_id << 3);
+ } else {
+ sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
+ PXP_VF_BAR0_START_IGU +
+ ((IGU_CMD_INT_ACK_BASE +
+ sb_info->igu_sb_id) << 3);
+ }
sb_info->flags |= QED_SB_INFO_INIT;
@@ -2783,24 +2798,20 @@ void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn,
{
p_hwfn->b_int_enabled = 0;
+ if (IS_VF(p_hwfn->cdev))
+ return;
+
qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
}
#define IGU_CLEANUP_SLEEP_LENGTH (1000)
-void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 sb_id,
- bool cleanup_set,
- u16 opaque_fid
- )
+static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 sb_id, bool cleanup_set, u16 opaque_fid)
{
+ u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0;
u32 pxp_addr = IGU_CMD_INT_ACK_BASE + sb_id;
u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
- u32 data = 0;
- u32 cmd_ctrl = 0;
- u32 val = 0;
- u32 sb_bit = 0;
- u32 sb_bit_addr = 0;
/* Set the data field */
SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0);
@@ -2845,11 +2856,9 @@ void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u32 sb_id,
- u16 opaque,
- bool b_set)
+ u32 sb_id, u16 opaque, bool b_set)
{
- int pi;
+ int pi, i;
/* Set */
if (b_set)
@@ -2858,6 +2867,22 @@ void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
/* Clear */
qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque);
+ /* Wait for the IGU SB to cleanup */
+ for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) {
+ u32 val;
+
+ val = qed_rd(p_hwfn, p_ptt,
+ IGU_REG_WRITE_DONE_PENDING + ((sb_id / 32) * 4));
+ if (val & (1 << (sb_id % 32)))
+ usleep_range(10, 20);
+ else
+ break;
+ }
+ if (i == IGU_CLEANUP_SLEEP_LENGTH)
+ DP_NOTICE(p_hwfn,
+ "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n",
+ sb_id);
+
/* Clear the CAU for the SB */
for (pi = 0; pi < 12; pi++)
qed_wr(p_hwfn, p_ptt,
@@ -2866,13 +2891,11 @@ void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- bool b_set,
- bool b_slowpath)
+ bool b_set, bool b_slowpath)
{
u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb;
u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt;
- u32 sb_id = 0;
- u32 val = 0;
+ u32 sb_id = 0, val = 0;
val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
@@ -2888,14 +2911,14 @@ void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
p_hwfn->hw_info.opaque_fid,
b_set);
- if (b_slowpath) {
- sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
- DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
- "IGU cleaning slowpath SB [%d]\n", sb_id);
- qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
- p_hwfn->hw_info.opaque_fid,
- b_set);
- }
+ if (!b_slowpath)
+ return;
+
+ sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "IGU cleaning slowpath SB [%d]\n", sb_id);
+ qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
+ p_hwfn->hw_info.opaque_fid, b_set);
}
static u32 qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn,
@@ -2935,9 +2958,9 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
struct qed_igu_info *p_igu_info;
+ u32 val, min_vf = 0, max_vf = 0;
+ u16 sb_id, last_iov_sb_id = 0;
struct qed_igu_block *blk;
- u32 val;
- u16 sb_id;
u16 prev_sb_id = 0xFF;
p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL);
@@ -2947,12 +2970,19 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
p_igu_info = p_hwfn->hw_info.p_igu_info;
- /* Initialize base sb / sb cnt for PFs */
+ /* Initialize base sb / sb cnt for PFs and VFs */
p_igu_info->igu_base_sb = 0xffff;
p_igu_info->igu_sb_cnt = 0;
p_igu_info->igu_dsb_id = 0xffff;
p_igu_info->igu_base_sb_iov = 0xffff;
+ if (p_hwfn->cdev->p_iov_info) {
+ struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
+
+ min_vf = p_iov->first_vf_in_pf;
+ max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs;
+ }
+
for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
sb_id++) {
blk = &p_igu_info->igu_map.igu_blocks[sb_id];
@@ -2986,14 +3016,43 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
(p_igu_info->igu_sb_cnt)++;
}
}
+ } else {
+ if ((blk->function_id >= min_vf) &&
+ (blk->function_id < max_vf)) {
+ /* Available for VFs of this PF */
+ if (p_igu_info->igu_base_sb_iov == 0xffff) {
+ p_igu_info->igu_base_sb_iov = sb_id;
+ } else if (last_iov_sb_id != sb_id - 1) {
+ if (!val) {
+ DP_VERBOSE(p_hwfn->cdev,
+ NETIF_MSG_INTR,
+ "First uninitialized IGU CAM entry at index 0x%04x\n",
+ sb_id);
+ } else {
+ DP_NOTICE(p_hwfn->cdev,
+ "Consecutive igu vectors for HWFN %x vfs is broken [jumps from %04x to %04x]\n",
+ p_hwfn->rel_pf_id,
+ last_iov_sb_id,
+ sb_id); }
+ break;
+ }
+ blk->status |= QED_IGU_STATUS_FREE;
+ p_hwfn->hw_info.p_igu_info->free_blks++;
+ last_iov_sb_id = sb_id;
+ }
}
}
-
- DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
- "IGU igu_base_sb=0x%x igu_sb_cnt=%d igu_dsb_id=0x%x\n",
- p_igu_info->igu_base_sb,
- p_igu_info->igu_sb_cnt,
- p_igu_info->igu_dsb_id);
+ p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks;
+
+ DP_VERBOSE(
+ p_hwfn,
+ NETIF_MSG_INTR,
+ "IGU igu_base_sb=0x%x [IOV 0x%x] igu_sb_cnt=%d [IOV 0x%x] igu_dsb_id=0x%x\n",
+ p_igu_info->igu_base_sb,
+ p_igu_info->igu_base_sb_iov,
+ p_igu_info->igu_sb_cnt,
+ p_igu_info->igu_sb_cnt_iov,
+ p_igu_info->igu_dsb_id);
if (p_igu_info->igu_base_sb == 0xffff ||
p_igu_info->igu_dsb_id == 0xffff ||
@@ -3116,6 +3175,23 @@ void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
p_sb_cnt_info->sb_free_blk = info->free_blks;
}
+u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
+{
+ struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
+
+ /* Determine origin of SB id */
+ if ((sb_id >= p_info->igu_base_sb) &&
+ (sb_id < p_info->igu_base_sb + p_info->igu_sb_cnt)) {
+ return sb_id - p_info->igu_base_sb;
+ } else if ((sb_id >= p_info->igu_base_sb_iov) &&
+ (sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) {
+ return sb_id - p_info->igu_base_sb_iov + p_info->igu_sb_cnt;
+ } else {
+ DP_NOTICE(p_hwfn, "SB %d not in range for function\n", sb_id);
+ return 0;
+ }
+}
+
void qed_int_disable_post_isr_release(struct qed_dev *cdev)
{
int i;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index c57f2e6..20b4686 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -20,6 +20,12 @@
#define IGU_PF_CONF_ATTN_BIT_EN (0x1 << 3) /* attention enable */
#define IGU_PF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */
#define IGU_PF_CONF_SIMD_MODE (0x1 << 5) /* simd all ones mode */
+/* Fields of IGU VF CONFIGRATION REGISTER */
+#define IGU_VF_CONF_FUNC_EN (0x1 << 0) /* function enable */
+#define IGU_VF_CONF_MSI_MSIX_EN (0x1 << 1) /* MSI/MSIX enable */
+#define IGU_VF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */
+#define IGU_VF_CONF_PARENT_MASK (0xF) /* Parent PF */
+#define IGU_VF_CONF_PARENT_SHIFT 5 /* Parent PF */
/* Igu control commands
*/
@@ -292,26 +298,8 @@ u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn);
* @param p_hwfn
* @param p_ptt
* @param sb_id - igu status block id
- * @param cleanup_set - set(1) / clear(0)
- * @param opaque_fid - the function for which to perform
- * cleanup, for example a PF on behalf of
- * its VFs.
- */
-void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 sb_id,
- bool cleanup_set,
- u16 opaque_fid);
-
-/**
- * @brief Status block cleanup. Should be called for each status
- * block that will be used -> both PF / VF
- *
- * @param p_hwfn
- * @param p_ptt
- * @param sb_id - igu status block id
* @param opaque - opaque fid of the sb owner.
- * @param cleanup_set - set(1) / clear(0)
+ * @param b_set - set(1) / clear(0)
*/
void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -365,6 +353,16 @@ void qed_int_setup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
+ * @brief - Returns an Rx queue index appropriate for usage with given SB.
+ *
+ * @param p_hwfn
+ * @param sb_id - absolute index of SB
+ *
+ * @return index of Rx queue
+ */
+u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
+
+/**
* @brief - Enable Interrupt & Attention for hw function
*
* @param p_hwfn
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 3f35c6c..8fba87dd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -31,137 +31,25 @@
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_int.h"
+#include "qed_l2.h"
#include "qed_mcp.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
+#include "qed_sriov.h"
-enum qed_rss_caps {
- QED_RSS_IPV4 = 0x1,
- QED_RSS_IPV6 = 0x2,
- QED_RSS_IPV4_TCP = 0x4,
- QED_RSS_IPV6_TCP = 0x8,
- QED_RSS_IPV4_UDP = 0x10,
- QED_RSS_IPV6_UDP = 0x20,
-};
-
-/* Should be the same as ETH_RSS_IND_TABLE_ENTRIES_NUM */
-#define QED_RSS_IND_TABLE_SIZE 128
-#define QED_RSS_KEY_SIZE 10 /* size in 32b chunks */
-
-struct qed_rss_params {
- u8 update_rss_config;
- u8 rss_enable;
- u8 rss_eng_id;
- u8 update_rss_capabilities;
- u8 update_rss_ind_table;
- u8 update_rss_key;
- u8 rss_caps;
- u8 rss_table_size_log;
- u16 rss_ind_table[QED_RSS_IND_TABLE_SIZE];
- u32 rss_key[QED_RSS_KEY_SIZE];
-};
-
-enum qed_filter_opcode {
- QED_FILTER_ADD,
- QED_FILTER_REMOVE,
- QED_FILTER_MOVE,
- QED_FILTER_REPLACE, /* Delete all MACs and add new one instead */
- QED_FILTER_FLUSH, /* Removes all filters */
-};
-
-enum qed_filter_ucast_type {
- QED_FILTER_MAC,
- QED_FILTER_VLAN,
- QED_FILTER_MAC_VLAN,
- QED_FILTER_INNER_MAC,
- QED_FILTER_INNER_VLAN,
- QED_FILTER_INNER_PAIR,
- QED_FILTER_INNER_MAC_VNI_PAIR,
- QED_FILTER_MAC_VNI_PAIR,
- QED_FILTER_VNI,
-};
-
-struct qed_filter_ucast {
- enum qed_filter_opcode opcode;
- enum qed_filter_ucast_type type;
- u8 is_rx_filter;
- u8 is_tx_filter;
- u8 vport_to_add_to;
- u8 vport_to_remove_from;
- unsigned char mac[ETH_ALEN];
- u8 assert_on_error;
- u16 vlan;
- u32 vni;
-};
-
-struct qed_filter_mcast {
- /* MOVE is not supported for multicast */
- enum qed_filter_opcode opcode;
- u8 vport_to_add_to;
- u8 vport_to_remove_from;
- u8 num_mc_addrs;
-#define QED_MAX_MC_ADDRS 64
- unsigned char mac[QED_MAX_MC_ADDRS][ETH_ALEN];
-};
-
-struct qed_filter_accept_flags {
- u8 update_rx_mode_config;
- u8 update_tx_mode_config;
- u8 rx_accept_filter;
- u8 tx_accept_filter;
-#define QED_ACCEPT_NONE 0x01
-#define QED_ACCEPT_UCAST_MATCHED 0x02
-#define QED_ACCEPT_UCAST_UNMATCHED 0x04
-#define QED_ACCEPT_MCAST_MATCHED 0x08
-#define QED_ACCEPT_MCAST_UNMATCHED 0x10
-#define QED_ACCEPT_BCAST 0x20
-};
-
-struct qed_sp_vport_update_params {
- u16 opaque_fid;
- u8 vport_id;
- u8 update_vport_active_rx_flg;
- u8 vport_active_rx_flg;
- u8 update_vport_active_tx_flg;
- u8 vport_active_tx_flg;
- u8 update_approx_mcast_flg;
- u8 update_accept_any_vlan_flg;
- u8 accept_any_vlan;
- unsigned long bins[8];
- struct qed_rss_params *rss_params;
- struct qed_filter_accept_flags accept_flags;
-};
-
-enum qed_tpa_mode {
- QED_TPA_MODE_NONE,
- QED_TPA_MODE_UNUSED,
- QED_TPA_MODE_GRO,
- QED_TPA_MODE_MAX
-};
-
-struct qed_sp_vport_start_params {
- enum qed_tpa_mode tpa_mode;
- bool remove_inner_vlan;
- bool drop_ttl0;
- u8 max_buffers_per_cqe;
- u32 concrete_fid;
- u16 opaque_fid;
- u8 vport_id;
- u16 mtu;
-};
#define QED_MAX_SGES_NUM 16
#define CRC32_POLY 0x1edc6f41
-static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
- struct qed_sp_vport_start_params *p_params)
+int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_start_params *p_params)
{
struct vport_start_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
+ u8 abs_vport_id = 0;
int rc = -EINVAL;
u16 rx_mode = 0;
- u8 abs_vport_id = 0;
rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
if (rc != 0)
@@ -211,6 +99,8 @@ static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
break;
}
+ p_ramrod->tx_switching_en = p_params->tx_switching;
+
/* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev,
p_params->concrete_fid);
@@ -218,6 +108,21 @@ static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
return qed_spq_post(p_hwfn, p_ent, NULL);
}
+int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_start_params *p_params)
+{
+ if (IS_VF(p_hwfn->cdev)) {
+ return qed_vf_pf_vport_start(p_hwfn, p_params->vport_id,
+ p_params->mtu,
+ p_params->remove_inner_vlan,
+ p_params->tpa_mode,
+ p_params->max_buffers_per_cqe,
+ p_params->only_untagged);
+ }
+
+ return qed_sp_eth_vport_start(p_hwfn, p_params);
+}
+
static int
qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn,
struct vport_update_ramrod_data *p_ramrod,
@@ -363,6 +268,38 @@ qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
}
static void
+qed_sp_vport_update_sge_tpa(struct qed_hwfn *p_hwfn,
+ struct vport_update_ramrod_data *p_ramrod,
+ struct qed_sge_tpa_params *p_params)
+{
+ struct eth_vport_tpa_param *p_tpa;
+
+ if (!p_params) {
+ p_ramrod->common.update_tpa_param_flg = 0;
+ p_ramrod->common.update_tpa_en_flg = 0;
+ p_ramrod->common.update_tpa_param_flg = 0;
+ return;
+ }
+
+ p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg;
+ p_tpa = &p_ramrod->tpa_param;
+ p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg;
+ p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg;
+ p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg;
+ p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg;
+
+ p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg;
+ p_tpa->max_buff_num = p_params->max_buffers_per_cqe;
+ p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg;
+ p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg;
+ p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg;
+ p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num;
+ p_tpa->tpa_max_size = p_params->tpa_max_size;
+ p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start;
+ p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont;
+}
+
+static void
qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
struct vport_update_ramrod_data *p_ramrod,
struct qed_sp_vport_update_params *p_params)
@@ -383,20 +320,24 @@ qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
}
}
-static int
-qed_sp_vport_update(struct qed_hwfn *p_hwfn,
- struct qed_sp_vport_update_params *p_params,
- enum spq_mode comp_mode,
- struct qed_spq_comp_cb *p_comp_data)
+int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_params,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
{
struct qed_rss_params *p_rss_params = p_params->rss_params;
struct vport_update_ramrod_data_cmn *p_cmn;
struct qed_sp_init_data init_data;
struct vport_update_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
- u8 abs_vport_id = 0;
+ u8 abs_vport_id = 0, val;
int rc = -EINVAL;
+ if (IS_VF(p_hwfn->cdev)) {
+ rc = qed_vf_pf_vport_update(p_hwfn, p_params);
+ return rc;
+ }
+
rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
if (rc != 0)
return rc;
@@ -425,6 +366,27 @@ qed_sp_vport_update(struct qed_hwfn *p_hwfn,
p_cmn->accept_any_vlan = p_params->accept_any_vlan;
p_cmn->update_accept_any_vlan_flg =
p_params->update_accept_any_vlan_flg;
+
+ p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg;
+ val = p_params->update_inner_vlan_removal_flg;
+ p_cmn->update_inner_vlan_removal_en_flg = val;
+
+ p_cmn->default_vlan_en = p_params->default_vlan_enable_flg;
+ val = p_params->update_default_vlan_enable_flg;
+ p_cmn->update_default_vlan_en_flg = val;
+
+ p_cmn->default_vlan = cpu_to_le16(p_params->default_vlan);
+ p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg;
+
+ p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg;
+
+ p_ramrod->common.tx_switching_en = p_params->tx_switching_flg;
+ p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg;
+
+ p_cmn->anti_spoofing_en = p_params->anti_spoofing_en;
+ val = p_params->update_anti_spoofing_en_flg;
+ p_ramrod->common.update_anti_spoofing_en_flg = val;
+
rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
if (rc) {
/* Return spq entry which is taken in qed_sp_init_request()*/
@@ -436,12 +398,11 @@ qed_sp_vport_update(struct qed_hwfn *p_hwfn,
qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
+ qed_sp_vport_update_sge_tpa(p_hwfn, p_ramrod, p_params->sge_tpa_params);
return qed_spq_post(p_hwfn, p_ent, NULL);
}
-static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn,
- u16 opaque_fid,
- u8 vport_id)
+int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id)
{
struct vport_stop_ramrod_data *p_ramrod;
struct qed_sp_init_data init_data;
@@ -449,6 +410,9 @@ static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn,
u8 abs_vport_id = 0;
int rc;
+ if (IS_VF(p_hwfn->cdev))
+ return qed_vf_pf_vport_stop(p_hwfn);
+
rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
if (rc != 0)
return rc;
@@ -470,13 +434,26 @@ static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn,
return qed_spq_post(p_hwfn, p_ent, NULL);
}
+static int
+qed_vf_pf_accept_flags(struct qed_hwfn *p_hwfn,
+ struct qed_filter_accept_flags *p_accept_flags)
+{
+ struct qed_sp_vport_update_params s_params;
+
+ memset(&s_params, 0, sizeof(s_params));
+ memcpy(&s_params.accept_flags, p_accept_flags,
+ sizeof(struct qed_filter_accept_flags));
+
+ return qed_vf_pf_vport_update(p_hwfn, &s_params);
+}
+
static int qed_filter_accept_cmd(struct qed_dev *cdev,
u8 vport,
struct qed_filter_accept_flags accept_flags,
u8 update_accept_any_vlan,
u8 accept_any_vlan,
- enum spq_mode comp_mode,
- struct qed_spq_comp_cb *p_comp_data)
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
{
struct qed_sp_vport_update_params vport_update_params;
int i, rc;
@@ -493,6 +470,13 @@ static int qed_filter_accept_cmd(struct qed_dev *cdev,
vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ if (IS_VF(cdev)) {
+ rc = qed_vf_pf_accept_flags(p_hwfn, &accept_flags);
+ if (rc)
+ return rc;
+ continue;
+ }
+
rc = qed_sp_vport_update(p_hwfn, &vport_update_params,
comp_mode, p_comp_data);
if (rc != 0) {
@@ -527,16 +511,14 @@ static int qed_sp_release_queue_cid(
return 0;
}
-static int
-qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
- u16 opaque_fid,
- u32 cid,
- struct qed_queue_start_common_params *params,
- u8 stats_id,
- u16 bd_max_bytes,
- dma_addr_t bd_chain_phys_addr,
- dma_addr_t cqe_pbl_addr,
- u16 cqe_pbl_size)
+int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ u32 cid,
+ struct qed_queue_start_common_params *params,
+ u8 stats_id,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size)
{
struct rx_queue_start_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
@@ -605,8 +587,7 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
- u16 cqe_pbl_size,
- void __iomem **pp_prod)
+ u16 cqe_pbl_size, void __iomem **pp_prod)
{
struct qed_hw_cid_data *p_rx_cid;
u64 init_prod_val = 0;
@@ -614,6 +595,16 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
u8 abs_stats_id = 0;
int rc;
+ if (IS_VF(p_hwfn->cdev)) {
+ return qed_vf_pf_rxq_start(p_hwfn,
+ params->queue_id,
+ params->sb,
+ params->sb_idx,
+ bd_max_bytes,
+ bd_chain_phys_addr,
+ cqe_pbl_addr, cqe_pbl_size, pp_prod);
+ }
+
rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_l2_queue);
if (rc != 0)
return rc;
@@ -656,10 +647,59 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
return rc;
}
-static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
- u16 rx_queue_id,
- bool eq_completion_only,
- bool cqe_completion)
+int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
+ u16 rx_queue_id,
+ u8 num_rxqs,
+ u8 complete_cqe_flg,
+ u8 complete_event_flg,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
+{
+ struct rx_queue_update_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ struct qed_hw_cid_data *p_rx_cid;
+ u16 qid, abs_rx_q_id = 0;
+ int rc = -EINVAL;
+ u8 i;
+
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_data;
+
+ for (i = 0; i < num_rxqs; i++) {
+ qid = rx_queue_id + i;
+ p_rx_cid = &p_hwfn->p_rx_cids[qid];
+
+ /* Get SPQ entry */
+ init_data.cid = p_rx_cid->cid;
+ init_data.opaque_fid = p_rx_cid->opaque_fid;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_RX_QUEUE_UPDATE,
+ PROTOCOLID_ETH, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.rx_queue_update;
+
+ qed_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
+ qed_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
+ p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
+ p_ramrod->complete_cqe_flg = complete_cqe_flg;
+ p_ramrod->complete_event_flg = complete_event_flg;
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ return rc;
+ }
+
+ return rc;
+}
+
+int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
+ u16 rx_queue_id,
+ bool eq_completion_only, bool cqe_completion)
{
struct qed_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
struct rx_queue_stop_ramrod_data *p_ramrod = NULL;
@@ -668,6 +708,9 @@ static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
u16 abs_rx_q_id = 0;
int rc = -EINVAL;
+ if (IS_VF(p_hwfn->cdev))
+ return qed_vf_pf_rxq_stop(p_hwfn, rx_queue_id, cqe_completion);
+
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
init_data.cid = p_rx_cid->cid;
@@ -703,15 +746,14 @@ static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
return qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
}
-static int
-qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
- u16 opaque_fid,
- u32 cid,
- struct qed_queue_start_common_params *p_params,
- u8 stats_id,
- dma_addr_t pbl_addr,
- u16 pbl_size,
- union qed_qm_pq_params *p_pq_params)
+int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ u32 cid,
+ struct qed_queue_start_common_params *p_params,
+ u8 stats_id,
+ dma_addr_t pbl_addr,
+ u16 pbl_size,
+ union qed_qm_pq_params *p_pq_params)
{
struct tx_queue_start_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
@@ -765,14 +807,21 @@ qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
struct qed_queue_start_common_params *p_params,
dma_addr_t pbl_addr,
- u16 pbl_size,
- void __iomem **pp_doorbell)
+ u16 pbl_size, void __iomem **pp_doorbell)
{
struct qed_hw_cid_data *p_tx_cid;
union qed_qm_pq_params pq_params;
u8 abs_stats_id = 0;
int rc;
+ if (IS_VF(p_hwfn->cdev)) {
+ return qed_vf_pf_txq_start(p_hwfn,
+ p_params->queue_id,
+ p_params->sb,
+ p_params->sb_idx,
+ pbl_addr, pbl_size, pp_doorbell);
+ }
+
rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_stats_id);
if (rc)
return rc;
@@ -813,14 +862,16 @@ qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
return rc;
}
-static int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn,
- u16 tx_queue_id)
+int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id)
{
struct qed_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
int rc = -EINVAL;
+ if (IS_VF(p_hwfn->cdev))
+ return qed_vf_pf_txq_stop(p_hwfn, tx_queue_id);
+
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
init_data.cid = p_tx_cid->cid;
@@ -1016,11 +1067,11 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
return 0;
}
-static int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
- u16 opaque_fid,
- struct qed_filter_ucast *p_filter_cmd,
- enum spq_mode comp_mode,
- struct qed_spq_comp_cb *p_comp_data)
+int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct qed_filter_ucast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
{
struct vport_filter_update_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
@@ -1118,7 +1169,7 @@ static inline u32 qed_crc32c_le(u32 seed,
return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
}
-static u8 qed_mcast_bin_from_mac(u8 *mac)
+u8 qed_mcast_bin_from_mac(u8 *mac)
{
u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
mac, ETH_ALEN);
@@ -1201,11 +1252,10 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
return qed_spq_post(p_hwfn, p_ent, NULL);
}
-static int
-qed_filter_mcast_cmd(struct qed_dev *cdev,
- struct qed_filter_mcast *p_filter_cmd,
- enum spq_mode comp_mode,
- struct qed_spq_comp_cb *p_comp_data)
+static int qed_filter_mcast_cmd(struct qed_dev *cdev,
+ struct qed_filter_mcast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
{
int rc = 0;
int i;
@@ -1221,8 +1271,10 @@ qed_filter_mcast_cmd(struct qed_dev *cdev,
u16 opaque_fid;
- if (rc != 0)
- break;
+ if (IS_VF(cdev)) {
+ qed_vf_pf_filter_mcast(p_hwfn, p_filter_cmd);
+ continue;
+ }
opaque_fid = p_hwfn->hw_info.opaque_fid;
@@ -1247,8 +1299,10 @@ static int qed_filter_ucast_cmd(struct qed_dev *cdev,
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
u16 opaque_fid;
- if (rc != 0)
- break;
+ if (IS_VF(cdev)) {
+ rc = qed_vf_pf_filter_ucast(p_hwfn, p_filter_cmd);
+ continue;
+ }
opaque_fid = p_hwfn->hw_info.opaque_fid;
@@ -1257,6 +1311,8 @@ static int qed_filter_ucast_cmd(struct qed_dev *cdev,
p_filter_cmd,
comp_mode,
p_comp_data);
+ if (rc != 0)
+ break;
}
return rc;
@@ -1265,12 +1321,19 @@ static int qed_filter_ucast_cmd(struct qed_dev *cdev,
/* Statistics related code */
static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn,
u32 *p_addr,
- u32 *p_len,
- u16 statistics_bin)
+ u32 *p_len, u16 statistics_bin)
{
- *p_addr = BAR0_MAP_REG_PSDM_RAM +
- PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
- *p_len = sizeof(struct eth_pstorm_per_queue_stat);
+ if (IS_PF(p_hwfn->cdev)) {
+ *p_addr = BAR0_MAP_REG_PSDM_RAM +
+ PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
+ *p_len = sizeof(struct eth_pstorm_per_queue_stat);
+ } else {
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
+
+ *p_addr = p_resp->pfdev_info.stats_info.pstats.address;
+ *p_len = p_resp->pfdev_info.stats_info.pstats.len;
+ }
}
static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
@@ -1285,32 +1348,15 @@ static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
statistics_bin);
memset(&pstats, 0, sizeof(pstats));
- qed_memcpy_from(p_hwfn, p_ptt, &pstats,
- pstats_addr, pstats_len);
-
- p_stats->tx_ucast_bytes +=
- HILO_64_REGPAIR(pstats.sent_ucast_bytes);
- p_stats->tx_mcast_bytes +=
- HILO_64_REGPAIR(pstats.sent_mcast_bytes);
- p_stats->tx_bcast_bytes +=
- HILO_64_REGPAIR(pstats.sent_bcast_bytes);
- p_stats->tx_ucast_pkts +=
- HILO_64_REGPAIR(pstats.sent_ucast_pkts);
- p_stats->tx_mcast_pkts +=
- HILO_64_REGPAIR(pstats.sent_mcast_pkts);
- p_stats->tx_bcast_pkts +=
- HILO_64_REGPAIR(pstats.sent_bcast_pkts);
- p_stats->tx_err_drop_pkts +=
- HILO_64_REGPAIR(pstats.error_drop_pkts);
-}
-
-static void __qed_get_vport_tstats_addrlen(struct qed_hwfn *p_hwfn,
- u32 *p_addr,
- u32 *p_len)
-{
- *p_addr = BAR0_MAP_REG_TSDM_RAM +
- TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
- *p_len = sizeof(struct tstorm_per_port_stat);
+ qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
+
+ p_stats->tx_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes);
+ p_stats->tx_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes);
+ p_stats->tx_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes);
+ p_stats->tx_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts);
+ p_stats->tx_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts);
+ p_stats->tx_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts);
+ p_stats->tx_err_drop_pkts += HILO_64_REGPAIR(pstats.error_drop_pkts);
}
static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
@@ -1318,14 +1364,23 @@ static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
struct qed_eth_stats *p_stats,
u16 statistics_bin)
{
- u32 tstats_addr = 0, tstats_len = 0;
struct tstorm_per_port_stat tstats;
+ u32 tstats_addr, tstats_len;
+
+ if (IS_PF(p_hwfn->cdev)) {
+ tstats_addr = BAR0_MAP_REG_TSDM_RAM +
+ TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
+ tstats_len = sizeof(struct tstorm_per_port_stat);
+ } else {
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
- __qed_get_vport_tstats_addrlen(p_hwfn, &tstats_addr, &tstats_len);
+ tstats_addr = p_resp->pfdev_info.stats_info.tstats.address;
+ tstats_len = p_resp->pfdev_info.stats_info.tstats.len;
+ }
memset(&tstats, 0, sizeof(tstats));
- qed_memcpy_from(p_hwfn, p_ptt, &tstats,
- tstats_addr, tstats_len);
+ qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
p_stats->mftag_filter_discards +=
HILO_64_REGPAIR(tstats.mftag_filter_discard);
@@ -1335,12 +1390,19 @@ static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
u32 *p_addr,
- u32 *p_len,
- u16 statistics_bin)
+ u32 *p_len, u16 statistics_bin)
{
- *p_addr = BAR0_MAP_REG_USDM_RAM +
- USTORM_QUEUE_STAT_OFFSET(statistics_bin);
- *p_len = sizeof(struct eth_ustorm_per_queue_stat);
+ if (IS_PF(p_hwfn->cdev)) {
+ *p_addr = BAR0_MAP_REG_USDM_RAM +
+ USTORM_QUEUE_STAT_OFFSET(statistics_bin);
+ *p_len = sizeof(struct eth_ustorm_per_queue_stat);
+ } else {
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
+
+ *p_addr = p_resp->pfdev_info.stats_info.ustats.address;
+ *p_len = p_resp->pfdev_info.stats_info.ustats.len;
+ }
}
static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn,
@@ -1355,31 +1417,31 @@ static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn,
statistics_bin);
memset(&ustats, 0, sizeof(ustats));
- qed_memcpy_from(p_hwfn, p_ptt, &ustats,
- ustats_addr, ustats_len);
-
- p_stats->rx_ucast_bytes +=
- HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
- p_stats->rx_mcast_bytes +=
- HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
- p_stats->rx_bcast_bytes +=
- HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
- p_stats->rx_ucast_pkts +=
- HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
- p_stats->rx_mcast_pkts +=
- HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
- p_stats->rx_bcast_pkts +=
- HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
+ qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
+
+ p_stats->rx_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
+ p_stats->rx_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
+ p_stats->rx_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
+ p_stats->rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
+ p_stats->rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
+ p_stats->rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
}
static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn,
u32 *p_addr,
- u32 *p_len,
- u16 statistics_bin)
+ u32 *p_len, u16 statistics_bin)
{
- *p_addr = BAR0_MAP_REG_MSDM_RAM +
- MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
- *p_len = sizeof(struct eth_mstorm_per_queue_stat);
+ if (IS_PF(p_hwfn->cdev)) {
+ *p_addr = BAR0_MAP_REG_MSDM_RAM +
+ MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
+ *p_len = sizeof(struct eth_mstorm_per_queue_stat);
+ } else {
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
+
+ *p_addr = p_resp->pfdev_info.stats_info.mstats.address;
+ *p_len = p_resp->pfdev_info.stats_info.mstats.len;
+ }
}
static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
@@ -1394,21 +1456,17 @@ static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
statistics_bin);
memset(&mstats, 0, sizeof(mstats));
- qed_memcpy_from(p_hwfn, p_ptt, &mstats,
- mstats_addr, mstats_len);
+ qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
- p_stats->no_buff_discards +=
- HILO_64_REGPAIR(mstats.no_buff_discard);
+ p_stats->no_buff_discards += HILO_64_REGPAIR(mstats.no_buff_discard);
p_stats->packet_too_big_discard +=
HILO_64_REGPAIR(mstats.packet_too_big_discard);
- p_stats->ttl0_discard +=
- HILO_64_REGPAIR(mstats.ttl0_discard);
+ p_stats->ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
p_stats->tpa_coalesced_pkts +=
HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
p_stats->tpa_coalesced_events +=
HILO_64_REGPAIR(mstats.tpa_coalesced_events);
- p_stats->tpa_aborts_num +=
- HILO_64_REGPAIR(mstats.tpa_aborts_num);
+ p_stats->tpa_aborts_num += HILO_64_REGPAIR(mstats.tpa_aborts_num);
p_stats->tpa_coalesced_bytes +=
HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
}
@@ -1428,16 +1486,16 @@ static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
sizeof(port_stats));
p_stats->rx_64_byte_packets += port_stats.pmm.r64;
- p_stats->rx_127_byte_packets += port_stats.pmm.r127;
- p_stats->rx_255_byte_packets += port_stats.pmm.r255;
- p_stats->rx_511_byte_packets += port_stats.pmm.r511;
- p_stats->rx_1023_byte_packets += port_stats.pmm.r1023;
- p_stats->rx_1518_byte_packets += port_stats.pmm.r1518;
- p_stats->rx_1522_byte_packets += port_stats.pmm.r1522;
- p_stats->rx_2047_byte_packets += port_stats.pmm.r2047;
- p_stats->rx_4095_byte_packets += port_stats.pmm.r4095;
- p_stats->rx_9216_byte_packets += port_stats.pmm.r9216;
- p_stats->rx_16383_byte_packets += port_stats.pmm.r16383;
+ p_stats->rx_65_to_127_byte_packets += port_stats.pmm.r127;
+ p_stats->rx_128_to_255_byte_packets += port_stats.pmm.r255;
+ p_stats->rx_256_to_511_byte_packets += port_stats.pmm.r511;
+ p_stats->rx_512_to_1023_byte_packets += port_stats.pmm.r1023;
+ p_stats->rx_1024_to_1518_byte_packets += port_stats.pmm.r1518;
+ p_stats->rx_1519_to_1522_byte_packets += port_stats.pmm.r1522;
+ p_stats->rx_1519_to_2047_byte_packets += port_stats.pmm.r2047;
+ p_stats->rx_2048_to_4095_byte_packets += port_stats.pmm.r4095;
+ p_stats->rx_4096_to_9216_byte_packets += port_stats.pmm.r9216;
+ p_stats->rx_9217_to_16383_byte_packets += port_stats.pmm.r16383;
p_stats->rx_crc_errors += port_stats.pmm.rfcs;
p_stats->rx_mac_crtl_frames += port_stats.pmm.rxcf;
p_stats->rx_pause_frames += port_stats.pmm.rxpf;
@@ -1481,44 +1539,49 @@ static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_eth_stats *stats,
- u16 statistics_bin)
+ u16 statistics_bin, bool b_get_port_stats)
{
__qed_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
__qed_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
__qed_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin);
__qed_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
- if (p_hwfn->mcp_info)
+ if (b_get_port_stats && p_hwfn->mcp_info)
__qed_get_vport_port_stats(p_hwfn, p_ptt, stats);
}
static void _qed_get_vport_stats(struct qed_dev *cdev,
struct qed_eth_stats *stats)
{
- u8 fw_vport = 0;
- int i;
+ u8 fw_vport = 0;
+ int i;
memset(stats, 0, sizeof(*stats));
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
- struct qed_ptt *p_ptt;
-
- /* The main vport index is relative first */
- if (qed_fw_vport(p_hwfn, 0, &fw_vport)) {
- DP_ERR(p_hwfn, "No vport available!\n");
- continue;
+ struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
+ : NULL;
+
+ if (IS_PF(cdev)) {
+ /* The main vport index is relative first */
+ if (qed_fw_vport(p_hwfn, 0, &fw_vport)) {
+ DP_ERR(p_hwfn, "No vport available!\n");
+ goto out;
+ }
}
- p_ptt = qed_ptt_acquire(p_hwfn);
- if (!p_ptt) {
+ if (IS_PF(cdev) && !p_ptt) {
DP_ERR(p_hwfn, "Failed to acquire ptt\n");
continue;
}
- __qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport);
+ __qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport,
+ IS_PF(cdev) ? true : false);
- qed_ptt_release(p_hwfn, p_ptt);
+out:
+ if (IS_PF(cdev) && p_ptt)
+ qed_ptt_release(p_hwfn, p_ptt);
}
}
@@ -1552,10 +1615,11 @@ void qed_reset_vport_stats(struct qed_dev *cdev)
struct eth_mstorm_per_queue_stat mstats;
struct eth_ustorm_per_queue_stat ustats;
struct eth_pstorm_per_queue_stat pstats;
- struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+ struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
+ : NULL;
u32 addr = 0, len = 0;
- if (!p_ptt) {
+ if (IS_PF(cdev) && !p_ptt) {
DP_ERR(p_hwfn, "Failed to acquire ptt\n");
continue;
}
@@ -1572,7 +1636,8 @@ void qed_reset_vport_stats(struct qed_dev *cdev)
__qed_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
qed_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
- qed_ptt_release(p_hwfn, p_ptt);
+ if (IS_PF(cdev))
+ qed_ptt_release(p_hwfn, p_ptt);
}
/* PORT statistics are not necessarily reset, so we need to
@@ -1593,32 +1658,61 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
info->num_tc = 1;
- if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
- for_each_hwfn(cdev, i)
- info->num_queues += FEAT_NUM(&cdev->hwfns[i],
- QED_PF_L2_QUE);
- if (cdev->int_params.fp_msix_cnt)
- info->num_queues = min_t(u8, info->num_queues,
- cdev->int_params.fp_msix_cnt);
+ if (IS_PF(cdev)) {
+ if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+ for_each_hwfn(cdev, i)
+ info->num_queues +=
+ FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
+ if (cdev->int_params.fp_msix_cnt)
+ info->num_queues =
+ min_t(u8, info->num_queues,
+ cdev->int_params.fp_msix_cnt);
+ } else {
+ info->num_queues = cdev->num_hwfns;
+ }
+
+ info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN);
+ ether_addr_copy(info->port_mac,
+ cdev->hwfns[0].hw_info.hw_mac_addr);
} else {
- info->num_queues = cdev->num_hwfns;
- }
+ qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), &info->num_queues);
+ if (cdev->num_hwfns > 1) {
+ u8 queues = 0;
- info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN);
- ether_addr_copy(info->port_mac,
- cdev->hwfns[0].hw_info.hw_mac_addr);
+ qed_vf_get_num_rxqs(&cdev->hwfns[1], &queues);
+ info->num_queues += queues;
+ }
+
+ qed_vf_get_num_vlan_filters(&cdev->hwfns[0],
+ &info->num_vlan_filters);
+ qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac);
+ }
qed_fill_dev_info(cdev, &info->common);
+ if (IS_VF(cdev))
+ memset(info->common.hw_mac, 0, ETH_ALEN);
+
return 0;
}
static void qed_register_eth_ops(struct qed_dev *cdev,
- struct qed_eth_cb_ops *ops,
- void *cookie)
+ struct qed_eth_cb_ops *ops, void *cookie)
+{
+ cdev->protocol_ops.eth = ops;
+ cdev->ops_cookie = cookie;
+
+ /* For VF, we start bulletin reading */
+ if (IS_VF(cdev))
+ qed_vf_start_iov_wq(cdev);
+}
+
+static bool qed_check_mac(struct qed_dev *cdev, u8 *mac)
{
- cdev->protocol_ops.eth = ops;
- cdev->ops_cookie = cookie;
+ if (IS_PF(cdev))
+ return true;
+
+ return qed_vf_check_mac(&cdev->hwfns[0], mac);
}
static int qed_start_vport(struct qed_dev *cdev,
@@ -1633,6 +1727,7 @@ static int qed_start_vport(struct qed_dev *cdev,
start.tpa_mode = params->gro_enable ? QED_TPA_MODE_GRO :
QED_TPA_MODE_NONE;
start.remove_inner_vlan = params->remove_inner_vlan;
+ start.only_untagged = true; /* untagged only */
start.drop_ttl0 = params->drop_ttl0;
start.opaque_fid = p_hwfn->hw_info.opaque_fid;
start.concrete_fid = p_hwfn->hw_info.concrete_fid;
@@ -1699,6 +1794,8 @@ static int qed_update_vport(struct qed_dev *cdev,
params->update_vport_active_flg;
sp_params.vport_active_rx_flg = params->vport_active_flg;
sp_params.vport_active_tx_flg = params->vport_active_flg;
+ sp_params.update_tx_switching_flg = params->update_tx_switching_flg;
+ sp_params.tx_switching_flg = params->tx_switching_flg;
sp_params.accept_any_vlan = params->accept_any_vlan;
sp_params.update_accept_any_vlan_flg =
params->update_accept_any_vlan_flg;
@@ -1744,9 +1841,7 @@ static int qed_update_vport(struct qed_dev *cdev,
sp_rss_params.update_rss_capabilities = 1;
sp_rss_params.update_rss_ind_table = 1;
sp_rss_params.update_rss_key = 1;
- sp_rss_params.rss_caps = QED_RSS_IPV4 |
- QED_RSS_IPV6 |
- QED_RSS_IPV4_TCP | QED_RSS_IPV6_TCP;
+ sp_rss_params.rss_caps = params->rss_params.rss_caps;
sp_rss_params.rss_table_size_log = 7; /* 2^7 = 128 */
memcpy(sp_rss_params.rss_ind_table,
params->rss_params.rss_ind_table,
@@ -1899,6 +1994,39 @@ static int qed_stop_txq(struct qed_dev *cdev,
return 0;
}
+static int qed_tunn_configure(struct qed_dev *cdev,
+ struct qed_tunn_params *tunn_params)
+{
+ struct qed_tunn_update_params tunn_info;
+ int i, rc;
+
+ if (IS_VF(cdev))
+ return 0;
+
+ memset(&tunn_info, 0, sizeof(tunn_info));
+ if (tunn_params->update_vxlan_port == 1) {
+ tunn_info.update_vxlan_udp_port = 1;
+ tunn_info.vxlan_udp_port = tunn_params->vxlan_port;
+ }
+
+ if (tunn_params->update_geneve_port == 1) {
+ tunn_info.update_geneve_udp_port = 1;
+ tunn_info.geneve_udp_port = tunn_params->geneve_port;
+ }
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *hwfn = &cdev->hwfns[i];
+
+ rc = qed_sp_pf_update_tunn_cfg(hwfn, &tunn_info,
+ QED_SPQ_MODE_EBLOCK, NULL);
+
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
enum qed_filter_rx_mode_type type)
{
@@ -2026,10 +2154,18 @@ static int qed_fp_cqe_completion(struct qed_dev *dev,
cqe);
}
+#ifdef CONFIG_QED_SRIOV
+extern const struct qed_iov_hv_ops qed_iov_ops_pass;
+#endif
+
static const struct qed_eth_ops qed_eth_ops_pass = {
.common = &qed_common_ops_pass,
+#ifdef CONFIG_QED_SRIOV
+ .iov = &qed_iov_ops_pass,
+#endif
.fill_dev_info = &qed_fill_eth_dev_info,
.register_ops = &qed_register_eth_ops,
+ .check_mac = &qed_check_mac,
.vport_start = &qed_start_vport,
.vport_stop = &qed_stop_vport,
.vport_update = &qed_update_vport,
@@ -2041,16 +2177,11 @@ static const struct qed_eth_ops qed_eth_ops_pass = {
.fastpath_stop = &qed_fastpath_stop,
.eth_cqe_completion = &qed_fp_cqe_completion,
.get_vport_stats = &qed_get_vport_stats,
+ .tunn_config = &qed_tunn_configure,
};
-const struct qed_eth_ops *qed_get_eth_ops(u32 version)
+const struct qed_eth_ops *qed_get_eth_ops(void)
{
- if (version != QED_ETH_INTERFACE_VERSION) {
- pr_notice("Cannot supply ethtool operations [%08x != %08x]\n",
- version, QED_ETH_INTERFACE_VERSION);
- return NULL;
- }
-
return &qed_eth_ops_pass;
}
EXPORT_SYMBOL(qed_get_eth_ops);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
new file mode 100644
index 0000000..0021145
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
@@ -0,0 +1,239 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+#ifndef _QED_L2_H
+#define _QED_L2_H
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/qed/qed_eth_if.h>
+#include "qed.h"
+#include "qed_hw.h"
+#include "qed_sp.h"
+
+struct qed_sge_tpa_params {
+ u8 max_buffers_per_cqe;
+
+ u8 update_tpa_en_flg;
+ u8 tpa_ipv4_en_flg;
+ u8 tpa_ipv6_en_flg;
+ u8 tpa_ipv4_tunn_en_flg;
+ u8 tpa_ipv6_tunn_en_flg;
+
+ u8 update_tpa_param_flg;
+ u8 tpa_pkt_split_flg;
+ u8 tpa_hdr_data_split_flg;
+ u8 tpa_gro_consistent_flg;
+ u8 tpa_max_aggs_num;
+ u16 tpa_max_size;
+ u16 tpa_min_size_to_start;
+ u16 tpa_min_size_to_cont;
+};
+
+enum qed_filter_opcode {
+ QED_FILTER_ADD,
+ QED_FILTER_REMOVE,
+ QED_FILTER_MOVE,
+ QED_FILTER_REPLACE, /* Delete all MACs and add new one instead */
+ QED_FILTER_FLUSH, /* Removes all filters */
+};
+
+enum qed_filter_ucast_type {
+ QED_FILTER_MAC,
+ QED_FILTER_VLAN,
+ QED_FILTER_MAC_VLAN,
+ QED_FILTER_INNER_MAC,
+ QED_FILTER_INNER_VLAN,
+ QED_FILTER_INNER_PAIR,
+ QED_FILTER_INNER_MAC_VNI_PAIR,
+ QED_FILTER_MAC_VNI_PAIR,
+ QED_FILTER_VNI,
+};
+
+struct qed_filter_ucast {
+ enum qed_filter_opcode opcode;
+ enum qed_filter_ucast_type type;
+ u8 is_rx_filter;
+ u8 is_tx_filter;
+ u8 vport_to_add_to;
+ u8 vport_to_remove_from;
+ unsigned char mac[ETH_ALEN];
+ u8 assert_on_error;
+ u16 vlan;
+ u32 vni;
+};
+
+struct qed_filter_mcast {
+ /* MOVE is not supported for multicast */
+ enum qed_filter_opcode opcode;
+ u8 vport_to_add_to;
+ u8 vport_to_remove_from;
+ u8 num_mc_addrs;
+#define QED_MAX_MC_ADDRS 64
+ unsigned char mac[QED_MAX_MC_ADDRS][ETH_ALEN];
+};
+
+int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
+ u16 rx_queue_id,
+ bool eq_completion_only, bool cqe_completion);
+
+int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id);
+
+enum qed_tpa_mode {
+ QED_TPA_MODE_NONE,
+ QED_TPA_MODE_UNUSED,
+ QED_TPA_MODE_GRO,
+ QED_TPA_MODE_MAX
+};
+
+struct qed_sp_vport_start_params {
+ enum qed_tpa_mode tpa_mode;
+ bool remove_inner_vlan;
+ bool tx_switching;
+ bool only_untagged;
+ bool drop_ttl0;
+ u8 max_buffers_per_cqe;
+ u32 concrete_fid;
+ u16 opaque_fid;
+ u8 vport_id;
+ u16 mtu;
+};
+
+int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_start_params *p_params);
+
+struct qed_rss_params {
+ u8 update_rss_config;
+ u8 rss_enable;
+ u8 rss_eng_id;
+ u8 update_rss_capabilities;
+ u8 update_rss_ind_table;
+ u8 update_rss_key;
+ u8 rss_caps;
+ u8 rss_table_size_log;
+ u16 rss_ind_table[QED_RSS_IND_TABLE_SIZE];
+ u32 rss_key[QED_RSS_KEY_SIZE];
+};
+
+struct qed_filter_accept_flags {
+ u8 update_rx_mode_config;
+ u8 update_tx_mode_config;
+ u8 rx_accept_filter;
+ u8 tx_accept_filter;
+#define QED_ACCEPT_NONE 0x01
+#define QED_ACCEPT_UCAST_MATCHED 0x02
+#define QED_ACCEPT_UCAST_UNMATCHED 0x04
+#define QED_ACCEPT_MCAST_MATCHED 0x08
+#define QED_ACCEPT_MCAST_UNMATCHED 0x10
+#define QED_ACCEPT_BCAST 0x20
+};
+
+struct qed_sp_vport_update_params {
+ u16 opaque_fid;
+ u8 vport_id;
+ u8 update_vport_active_rx_flg;
+ u8 vport_active_rx_flg;
+ u8 update_vport_active_tx_flg;
+ u8 vport_active_tx_flg;
+ u8 update_inner_vlan_removal_flg;
+ u8 inner_vlan_removal_flg;
+ u8 silent_vlan_removal_flg;
+ u8 update_default_vlan_enable_flg;
+ u8 default_vlan_enable_flg;
+ u8 update_default_vlan_flg;
+ u16 default_vlan;
+ u8 update_tx_switching_flg;
+ u8 tx_switching_flg;
+ u8 update_approx_mcast_flg;
+ u8 update_anti_spoofing_en_flg;
+ u8 anti_spoofing_en;
+ u8 update_accept_any_vlan_flg;
+ u8 accept_any_vlan;
+ unsigned long bins[8];
+ struct qed_rss_params *rss_params;
+ struct qed_filter_accept_flags accept_flags;
+ struct qed_sge_tpa_params *sge_tpa_params;
+};
+
+int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_params,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data);
+
+/**
+ * @brief qed_sp_vport_stop -
+ *
+ * This ramrod closes a VPort after all its RX and TX queues are terminated.
+ * An Assert is generated if any queues are left open.
+ *
+ * @param p_hwfn
+ * @param opaque_fid
+ * @param vport_id VPort ID
+ *
+ * @return int
+ */
+int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id);
+
+int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct qed_filter_ucast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data);
+
+/**
+ * @brief qed_sp_rx_eth_queues_update -
+ *
+ * This ramrod updates an RX queue. It is used for setting the active state
+ * of the queue and updating the TPA and SGE parameters.
+ *
+ * @note At the moment - only used by non-linux VFs.
+ *
+ * @param p_hwfn
+ * @param rx_queue_id RX Queue ID
+ * @param num_rxqs Allow to update multiple rx
+ * queues, from rx_queue_id to
+ * (rx_queue_id + num_rxqs)
+ * @param complete_cqe_flg Post completion to the CQE Ring if set
+ * @param complete_event_flg Post completion to the Event Ring if set
+ *
+ * @return int
+ */
+
+int
+qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
+ u16 rx_queue_id,
+ u8 num_rxqs,
+ u8 complete_cqe_flg,
+ u8 complete_event_flg,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data);
+
+int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_start_params *p_params);
+
+int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ u32 cid,
+ struct qed_queue_start_common_params *params,
+ u8 stats_id,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size);
+
+int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ u32 cid,
+ struct qed_queue_start_common_params *p_params,
+ u8 stats_id,
+ dma_addr_t pbl_addr,
+ u16 pbl_size,
+ union qed_qm_pq_params *p_pq_params);
+
+u8 qed_mcast_bin_from_mac(u8 *mac);
+
+#endif /* _QED_L2_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 26d40db..8b22f87 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -24,10 +24,12 @@
#include <linux/qed/qed_if.h>
#include "qed.h"
+#include "qed_sriov.h"
#include "qed_sp.h"
#include "qed_dev_api.h"
#include "qed_mcp.h"
#include "qed_hw.h"
+#include "qed_selftest.h"
static char version[] =
"QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
@@ -124,7 +126,7 @@ static int qed_init_pci(struct qed_dev *cdev,
goto err1;
}
- if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
+ if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
DP_NOTICE(cdev, "No memory region found in bar #2\n");
rc = -EIO;
goto err1;
@@ -156,7 +158,7 @@ static int qed_init_pci(struct qed_dev *cdev,
}
cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
- if (cdev->pci_params.pm_cap == 0)
+ if (IS_PF(cdev) && !cdev->pci_params.pm_cap)
DP_NOTICE(cdev, "Cannot find power management capability\n");
rc = qed_set_coherency_mask(cdev);
@@ -174,12 +176,14 @@ static int qed_init_pci(struct qed_dev *cdev,
goto err2;
}
- cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
- cdev->db_size = pci_resource_len(cdev->pdev, 2);
- cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
- if (!cdev->doorbells) {
- DP_NOTICE(cdev, "Cannot map doorbell space\n");
- return -ENOMEM;
+ if (IS_PF(cdev)) {
+ cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
+ cdev->db_size = pci_resource_len(cdev->pdev, 2);
+ cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
+ if (!cdev->doorbells) {
+ DP_NOTICE(cdev, "Cannot map doorbell space\n");
+ return -ENOMEM;
+ }
}
return 0;
@@ -206,20 +210,33 @@ int qed_fill_dev_info(struct qed_dev *cdev,
dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
- dev_info->fw_major = FW_MAJOR_VERSION;
- dev_info->fw_minor = FW_MINOR_VERSION;
- dev_info->fw_rev = FW_REVISION_VERSION;
- dev_info->fw_eng = FW_ENGINEERING_VERSION;
- dev_info->mf_mode = cdev->mf_mode;
+ if (IS_PF(cdev)) {
+ dev_info->fw_major = FW_MAJOR_VERSION;
+ dev_info->fw_minor = FW_MINOR_VERSION;
+ dev_info->fw_rev = FW_REVISION_VERSION;
+ dev_info->fw_eng = FW_ENGINEERING_VERSION;
+ dev_info->mf_mode = cdev->mf_mode;
+ dev_info->tx_switching = true;
+ } else {
+ qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
+ &dev_info->fw_minor, &dev_info->fw_rev,
+ &dev_info->fw_eng);
+ }
- qed_mcp_get_mfw_ver(cdev, &dev_info->mfw_rev);
+ if (IS_PF(cdev)) {
+ ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
+ if (ptt) {
+ qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt,
+ &dev_info->mfw_rev, NULL);
- ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
- if (ptt) {
- qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
- &dev_info->flash_size);
+ qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
+ &dev_info->flash_size);
- qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
+ qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
+ }
+ } else {
+ qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL,
+ &dev_info->mfw_rev, NULL);
}
return 0;
@@ -256,9 +273,7 @@ static int qed_set_power_state(struct qed_dev *cdev,
/* probing */
static struct qed_dev *qed_probe(struct pci_dev *pdev,
- enum qed_protocol protocol,
- u32 dp_module,
- u8 dp_level)
+ struct qed_probe_params *params)
{
struct qed_dev *cdev;
int rc;
@@ -267,9 +282,12 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev,
if (!cdev)
goto err0;
- cdev->protocol = protocol;
+ cdev->protocol = params->protocol;
+
+ if (params->is_vf)
+ cdev->b_is_vf = true;
- qed_init_dp(cdev, dp_module, dp_level);
+ qed_init_dp(cdev, params->dp_module, params->dp_level);
rc = qed_init_pci(cdev, pdev);
if (rc) {
@@ -663,6 +681,35 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
return 0;
}
+static int qed_slowpath_vf_setup_int(struct qed_dev *cdev)
+{
+ int rc;
+
+ memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
+ cdev->int_params.in.int_mode = QED_INT_MODE_MSIX;
+
+ qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev),
+ &cdev->int_params.in.num_vectors);
+ if (cdev->num_hwfns > 1) {
+ u8 vectors = 0;
+
+ qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors);
+ cdev->int_params.in.num_vectors += vectors;
+ }
+
+ /* We want a minimum of one fastpath vector per vf hwfn */
+ cdev->int_params.in.min_msix_cnt = cdev->num_hwfns;
+
+ rc = qed_set_int_mode(cdev, true);
+ if (rc)
+ return rc;
+
+ cdev->int_params.fp_msix_base = 0;
+ cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors;
+
+ return 0;
+}
+
u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len,
u8 *input_buf, u32 max_size, u8 *unzip_buf)
{
@@ -744,39 +791,62 @@ static void qed_update_pf_params(struct qed_dev *cdev,
static int qed_slowpath_start(struct qed_dev *cdev,
struct qed_slowpath_params *params)
{
+ struct qed_tunn_start_params tunn_info;
struct qed_mcp_drv_version drv_version;
const u8 *data = NULL;
struct qed_hwfn *hwfn;
- int rc;
+ int rc = -EINVAL;
- rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
- &cdev->pdev->dev);
- if (rc) {
- DP_NOTICE(cdev,
- "Failed to find fw file - /lib/firmware/%s\n",
- QED_FW_FILE_NAME);
+ if (qed_iov_wq_start(cdev))
goto err;
+
+ if (IS_PF(cdev)) {
+ rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
+ &cdev->pdev->dev);
+ if (rc) {
+ DP_NOTICE(cdev,
+ "Failed to find fw file - /lib/firmware/%s\n",
+ QED_FW_FILE_NAME);
+ goto err;
+ }
}
rc = qed_nic_setup(cdev);
if (rc)
goto err;
- rc = qed_slowpath_setup_int(cdev, params->int_mode);
+ if (IS_PF(cdev))
+ rc = qed_slowpath_setup_int(cdev, params->int_mode);
+ else
+ rc = qed_slowpath_vf_setup_int(cdev);
if (rc)
goto err1;
- /* Allocate stream for unzipping */
- rc = qed_alloc_stream_mem(cdev);
- if (rc) {
- DP_NOTICE(cdev, "Failed to allocate stream memory\n");
- goto err2;
+ if (IS_PF(cdev)) {
+ /* Allocate stream for unzipping */
+ rc = qed_alloc_stream_mem(cdev);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed to allocate stream memory\n");
+ goto err2;
+ }
+
+ data = cdev->firmware->data;
}
- /* Start the slowpath */
- data = cdev->firmware->data;
+ memset(&tunn_info, 0, sizeof(tunn_info));
+ tunn_info.tunn_mode |= 1 << QED_MODE_VXLAN_TUNN |
+ 1 << QED_MODE_L2GRE_TUNN |
+ 1 << QED_MODE_IPGRE_TUNN |
+ 1 << QED_MODE_L2GENEVE_TUNN |
+ 1 << QED_MODE_IPGENEVE_TUNN;
+
+ tunn_info.tunn_clss_vxlan = QED_TUNN_CLSS_MAC_VLAN;
+ tunn_info.tunn_clss_l2gre = QED_TUNN_CLSS_MAC_VLAN;
+ tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN;
- rc = qed_hw_init(cdev, true, cdev->int_params.out.int_mode,
+ /* Start the slowpath */
+ rc = qed_hw_init(cdev, &tunn_info, true,
+ cdev->int_params.out.int_mode,
true, data);
if (rc)
goto err2;
@@ -784,18 +854,20 @@ static int qed_slowpath_start(struct qed_dev *cdev,
DP_INFO(cdev,
"HW initialization and function start completed successfully\n");
- hwfn = QED_LEADING_HWFN(cdev);
- drv_version.version = (params->drv_major << 24) |
- (params->drv_minor << 16) |
- (params->drv_rev << 8) |
- (params->drv_eng);
- strlcpy(drv_version.name, params->name,
- MCP_DRV_VER_STR_SIZE - 4);
- rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
- &drv_version);
- if (rc) {
- DP_NOTICE(cdev, "Failed sending drv version command\n");
- return rc;
+ if (IS_PF(cdev)) {
+ hwfn = QED_LEADING_HWFN(cdev);
+ drv_version.version = (params->drv_major << 24) |
+ (params->drv_minor << 16) |
+ (params->drv_rev << 8) |
+ (params->drv_eng);
+ strlcpy(drv_version.name, params->name,
+ MCP_DRV_VER_STR_SIZE - 4);
+ rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
+ &drv_version);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed sending drv version command\n");
+ return rc;
+ }
}
qed_reset_vport_stats(cdev);
@@ -804,13 +876,17 @@ static int qed_slowpath_start(struct qed_dev *cdev,
err2:
qed_hw_timers_stop_all(cdev);
- qed_slowpath_irq_free(cdev);
+ if (IS_PF(cdev))
+ qed_slowpath_irq_free(cdev);
qed_free_stream_mem(cdev);
qed_disable_msix(cdev);
err1:
qed_resc_free(cdev);
err:
- release_firmware(cdev->firmware);
+ if (IS_PF(cdev))
+ release_firmware(cdev->firmware);
+
+ qed_iov_wq_stop(cdev, false);
return rc;
}
@@ -820,15 +896,21 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
if (!cdev)
return -ENODEV;
- qed_free_stream_mem(cdev);
+ if (IS_PF(cdev)) {
+ qed_free_stream_mem(cdev);
+ qed_sriov_disable(cdev, true);
- qed_nic_stop(cdev);
- qed_slowpath_irq_free(cdev);
+ qed_nic_stop(cdev);
+ qed_slowpath_irq_free(cdev);
+ }
qed_disable_msix(cdev);
qed_nic_reset(cdev);
- release_firmware(cdev->firmware);
+ qed_iov_wq_stop(cdev, true);
+
+ if (IS_PF(cdev))
+ release_firmware(cdev->firmware);
return 0;
}
@@ -902,6 +984,11 @@ static u32 qed_sb_release(struct qed_dev *cdev,
return rc;
}
+static bool qed_can_link_change(struct qed_dev *cdev)
+{
+ return true;
+}
+
static int qed_set_link(struct qed_dev *cdev,
struct qed_link_params *params)
{
@@ -913,6 +1000,9 @@ static int qed_set_link(struct qed_dev *cdev,
if (!cdev)
return -ENODEV;
+ if (IS_VF(cdev))
+ return 0;
+
/* The link should be set only once per PF */
hwfn = &cdev->hwfns[0];
@@ -944,6 +1034,39 @@ static int qed_set_link(struct qed_dev *cdev,
}
if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
link_params->speed.forced_speed = params->forced_speed;
+ if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) {
+ if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
+ link_params->pause.autoneg = true;
+ else
+ link_params->pause.autoneg = false;
+ if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE)
+ link_params->pause.forced_rx = true;
+ else
+ link_params->pause.forced_rx = false;
+ if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE)
+ link_params->pause.forced_tx = true;
+ else
+ link_params->pause.forced_tx = false;
+ }
+ if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) {
+ switch (params->loopback_mode) {
+ case QED_LINK_LOOPBACK_INT_PHY:
+ link_params->loopback_mode = PMM_LOOPBACK_INT_PHY;
+ break;
+ case QED_LINK_LOOPBACK_EXT_PHY:
+ link_params->loopback_mode = PMM_LOOPBACK_EXT_PHY;
+ break;
+ case QED_LINK_LOOPBACK_EXT:
+ link_params->loopback_mode = PMM_LOOPBACK_EXT;
+ break;
+ case QED_LINK_LOOPBACK_MAC:
+ link_params->loopback_mode = PMM_LOOPBACK_MAC;
+ break;
+ default:
+ link_params->loopback_mode = PMM_LOOPBACK_NONE;
+ break;
+ }
+ }
rc = qed_mcp_set_link(hwfn, ptt, params->link_up);
@@ -991,10 +1114,16 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
memset(if_link, 0, sizeof(*if_link));
/* Prepare source inputs */
- memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params));
- memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link));
- memcpy(&link_caps, qed_mcp_get_link_capabilities(hwfn),
- sizeof(link_caps));
+ if (IS_PF(hwfn->cdev)) {
+ memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params));
+ memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link));
+ memcpy(&link_caps, qed_mcp_get_link_capabilities(hwfn),
+ sizeof(link_caps));
+ } else {
+ qed_vf_get_link_params(hwfn, &params);
+ qed_vf_get_link_state(hwfn, &link);
+ qed_vf_get_link_caps(hwfn, &link_caps);
+ }
/* Set the link parameters to pass to protocol driver */
if (link.link_up)
@@ -1096,7 +1225,12 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
static void qed_get_current_link(struct qed_dev *cdev,
struct qed_link_output *if_link)
{
+ int i;
+
qed_fill_link(&cdev->hwfns[0], if_link);
+
+ for_each_hwfn(cdev, i)
+ qed_inform_vf_link_state(&cdev->hwfns[i]);
}
void qed_link_update(struct qed_hwfn *hwfn)
@@ -1106,6 +1240,7 @@ void qed_link_update(struct qed_hwfn *hwfn)
struct qed_link_output if_link;
qed_fill_link(hwfn, &if_link);
+ qed_inform_vf_link_state(hwfn);
if (IS_LEAD_HWFN(hwfn) && cookie)
op->link_update(cookie, &if_link);
@@ -1117,6 +1252,9 @@ static int qed_drain(struct qed_dev *cdev)
struct qed_ptt *ptt;
int i, rc;
+ if (IS_VF(cdev))
+ return 0;
+
for_each_hwfn(cdev, i) {
hwfn = &cdev->hwfns[i];
ptt = qed_ptt_acquire(hwfn);
@@ -1150,7 +1288,15 @@ static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
return status;
}
+struct qed_selftest_ops qed_selftest_ops_pass = {
+ .selftest_memory = &qed_selftest_memory,
+ .selftest_interrupt = &qed_selftest_interrupt,
+ .selftest_register = &qed_selftest_register,
+ .selftest_clock = &qed_selftest_clock,
+};
+
const struct qed_common_ops qed_common_ops_pass = {
+ .selftest = &qed_selftest_ops_pass,
.probe = &qed_probe,
.remove = &qed_remove,
.set_power_state = &qed_set_power_state,
@@ -1164,6 +1310,7 @@ const struct qed_common_ops qed_common_ops_pass = {
.sb_release = &qed_sb_release,
.simd_handler_config = &qed_simd_handler_config,
.simd_handler_clean = &qed_simd_handler_clean,
+ .can_link_change = &qed_can_link_change,
.set_link = &qed_set_link,
.get_link = &qed_get_current_link,
.drain = &qed_drain,
@@ -1172,14 +1319,3 @@ const struct qed_common_ops qed_common_ops_pass = {
.chain_free = &qed_chain_free,
.set_led = &qed_set_led,
};
-
-u32 qed_get_protocol_version(enum qed_protocol protocol)
-{
- switch (protocol) {
- case QED_PROTOCOL_ETH:
- return QED_ETH_INTERFACE_VERSION;
- default:
- return 0;
- }
-}
-EXPORT_SYMBOL(qed_get_protocol_version);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index b89c9a8..1182361 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -15,10 +15,13 @@
#include <linux/spinlock.h>
#include <linux/string.h>
#include "qed.h"
+#include "qed_dcbx.h"
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_mcp.h"
#include "qed_reg_addr.h"
+#include "qed_sriov.h"
+
#define CHIP_MCP_RESP_ITER_US 10
#define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
@@ -440,6 +443,75 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
return 0;
}
+static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_PATH);
+ u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
+ u32 path_addr = SECTION_ADDR(mfw_path_offsize,
+ QED_PATH_ID(p_hwfn));
+ u32 disabled_vfs[VF_MAX_STATIC / 32];
+ int i;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SP,
+ "Reading Disabled VF information from [offset %08x], path_addr %08x\n",
+ mfw_path_offsize, path_addr);
+
+ for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
+ disabled_vfs[i] = qed_rd(p_hwfn, p_ptt,
+ path_addr +
+ offsetof(struct public_path,
+ mcp_vf_disabled) +
+ sizeof(u32) * i);
+ DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
+ "FLR-ed VFs [%08x,...,%08x] - %08x\n",
+ i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
+ }
+
+ if (qed_iov_mark_vf_flr(p_hwfn, disabled_vfs))
+ qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG);
+}
+
+int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *vfs_to_ack)
+{
+ u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_FUNC);
+ u32 mfw_func_offsize = qed_rd(p_hwfn, p_ptt, addr);
+ u32 func_addr = SECTION_ADDR(mfw_func_offsize,
+ MCP_PF_ID(p_hwfn));
+ struct qed_mcp_mb_params mb_params;
+ union drv_union_data union_data;
+ int rc;
+ int i;
+
+ for (i = 0; i < (VF_MAX_STATIC / 32); i++)
+ DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
+ "Acking VFs [%08x,...,%08x] - %08x\n",
+ i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
+
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
+ memcpy(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8);
+ mb_params.p_data_src = &union_data;
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n");
+ return -EBUSY;
+ }
+
+ /* Clear the ACK bits */
+ for (i = 0; i < (VF_MAX_STATIC / 32); i++)
+ qed_wr(p_hwfn, p_ptt,
+ func_addr +
+ offsetof(struct public_func, drv_ack_vf_disabled) +
+ i * sizeof(u32), 0);
+
+ return rc;
+}
+
static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
@@ -472,6 +544,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
bool b_reset)
{
struct qed_mcp_link_state *p_link;
+ u8 max_bw, min_bw;
u32 status = 0;
p_link = &p_hwfn->mcp_info->link_output;
@@ -527,17 +600,20 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
p_link->speed = 0;
}
- /* Correct speed according to bandwidth allocation */
- if (p_hwfn->mcp_info->func_info.bandwidth_max && p_link->speed) {
- p_link->speed = p_link->speed *
- p_hwfn->mcp_info->func_info.bandwidth_max /
- 100;
- qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
- p_link->speed);
- DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
- "Configured MAX bandwidth to be %08x Mb/sec\n",
- p_link->speed);
- }
+ if (p_link->link_up && p_link->speed)
+ p_link->line_speed = p_link->speed;
+ else
+ p_link->line_speed = 0;
+
+ max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
+ min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
+
+ /* Max bandwidth configuration */
+ __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw);
+
+ /* Min bandwidth configuration */
+ __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw);
+ qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_link->min_pf_rate);
p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
p_link->an_complete = !!(status &
@@ -648,6 +724,77 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
return 0;
}
+static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
+ struct public_func *p_shmem_info)
+{
+ struct qed_mcp_function_info *p_info;
+
+ p_info = &p_hwfn->mcp_info->func_info;
+
+ p_info->bandwidth_min = (p_shmem_info->config &
+ FUNC_MF_CFG_MIN_BW_MASK) >>
+ FUNC_MF_CFG_MIN_BW_SHIFT;
+ if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
+ DP_INFO(p_hwfn,
+ "bandwidth minimum out of bounds [%02x]. Set to 1\n",
+ p_info->bandwidth_min);
+ p_info->bandwidth_min = 1;
+ }
+
+ p_info->bandwidth_max = (p_shmem_info->config &
+ FUNC_MF_CFG_MAX_BW_MASK) >>
+ FUNC_MF_CFG_MAX_BW_SHIFT;
+ if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
+ DP_INFO(p_hwfn,
+ "bandwidth maximum out of bounds [%02x]. Set to 100\n",
+ p_info->bandwidth_max);
+ p_info->bandwidth_max = 100;
+ }
+}
+
+static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct public_func *p_data,
+ int pfid)
+{
+ u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_FUNC);
+ u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
+ u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
+ u32 i, size;
+
+ memset(p_data, 0, sizeof(*p_data));
+
+ size = min_t(u32, sizeof(*p_data),
+ QED_SECTION_SIZE(mfw_path_offsize));
+ for (i = 0; i < size / sizeof(u32); i++)
+ ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
+ func_addr + (i << 2));
+ return size;
+}
+
+static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_function_info *p_info;
+ struct public_func shmem_info;
+ u32 resp = 0, param = 0;
+
+ qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
+ MCP_PF_ID(p_hwfn));
+
+ qed_read_pf_bandwidth(p_hwfn, &shmem_info);
+
+ p_info = &p_hwfn->mcp_info->func_info;
+
+ qed_configure_pf_min_bandwidth(p_hwfn->cdev, p_info->bandwidth_min);
+ qed_configure_pf_max_bandwidth(p_hwfn->cdev, p_info->bandwidth_max);
+
+ /* Acknowledge the MFW */
+ qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
+ &param);
+}
+
int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
@@ -676,9 +823,27 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
case MFW_DRV_MSG_LINK_CHANGE:
qed_mcp_handle_link_change(p_hwfn, p_ptt, false);
break;
+ case MFW_DRV_MSG_VF_DISABLED:
+ qed_mcp_handle_vf_flr(p_hwfn, p_ptt);
+ break;
+ case MFW_DRV_MSG_LLDP_DATA_UPDATED:
+ qed_dcbx_mib_update_event(p_hwfn, p_ptt,
+ QED_DCBX_REMOTE_LLDP_MIB);
+ break;
+ case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
+ qed_dcbx_mib_update_event(p_hwfn, p_ptt,
+ QED_DCBX_REMOTE_MIB);
+ break;
+ case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
+ qed_dcbx_mib_update_event(p_hwfn, p_ptt,
+ QED_DCBX_OPERATIONAL_MIB);
+ break;
case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
break;
+ case MFW_DRV_MSG_BW_UPDATE:
+ qed_mcp_update_bw(p_hwfn, p_ptt);
+ break;
default:
DP_NOTICE(p_hwfn, "Unimplemented MFW message %d\n", i);
rc = -EINVAL;
@@ -709,26 +874,42 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
return rc;
}
-int qed_mcp_get_mfw_ver(struct qed_dev *cdev,
- u32 *p_mfw_ver)
+int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *p_mfw_ver, u32 *p_running_bundle_id)
{
- struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
- struct qed_ptt *p_ptt;
u32 global_offsize;
- p_ptt = qed_ptt_acquire(p_hwfn);
- if (!p_ptt)
- return -EBUSY;
+ if (IS_VF(p_hwfn->cdev)) {
+ if (p_hwfn->vf_iov_info) {
+ struct pfvf_acquire_resp_tlv *p_resp;
+
+ p_resp = &p_hwfn->vf_iov_info->acquire_resp;
+ *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
+ return 0;
+ } else {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF requested MFW version prior to ACQUIRE\n");
+ return -EINVAL;
+ }
+ }
global_offsize = qed_rd(p_hwfn, p_ptt,
- SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
- public_base,
+ SECTION_OFFSIZE_ADDR(p_hwfn->
+ mcp_info->public_base,
PUBLIC_GLOBAL));
- *p_mfw_ver = qed_rd(p_hwfn, p_ptt,
- SECTION_ADDR(global_offsize, 0) +
- offsetof(struct public_global, mfw_ver));
-
- qed_ptt_release(p_hwfn, p_ptt);
+ *p_mfw_ver =
+ qed_rd(p_hwfn, p_ptt,
+ SECTION_ADDR(global_offsize,
+ 0) + offsetof(struct public_global, mfw_ver));
+
+ if (p_running_bundle_id != NULL) {
+ *p_running_bundle_id = qed_rd(p_hwfn, p_ptt,
+ SECTION_ADDR(global_offsize, 0) +
+ offsetof(struct public_global,
+ running_bundle_id));
+ }
return 0;
}
@@ -739,6 +920,9 @@ int qed_mcp_get_media_type(struct qed_dev *cdev,
struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
struct qed_ptt *p_ptt;
+ if (IS_VF(cdev))
+ return -EINVAL;
+
if (!qed_mcp_is_init(p_hwfn)) {
DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
return -EBUSY;
@@ -758,28 +942,6 @@ int qed_mcp_get_media_type(struct qed_dev *cdev,
return 0;
}
-static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct public_func *p_data,
- int pfid)
-{
- u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
- PUBLIC_FUNC);
- u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
- u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
- u32 i, size;
-
- memset(p_data, 0, sizeof(*p_data));
-
- size = min_t(u32, sizeof(*p_data),
- QED_SECTION_SIZE(mfw_path_offsize));
- for (i = 0; i < size / sizeof(u32); i++)
- ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
- func_addr + (i << 2));
-
- return size;
-}
-
static int
qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
struct public_func *p_info,
@@ -818,26 +980,7 @@ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
return -EINVAL;
}
-
- info->bandwidth_min = (shmem_info.config &
- FUNC_MF_CFG_MIN_BW_MASK) >>
- FUNC_MF_CFG_MIN_BW_SHIFT;
- if (info->bandwidth_min < 1 || info->bandwidth_min > 100) {
- DP_INFO(p_hwfn,
- "bandwidth minimum out of bounds [%02x]. Set to 1\n",
- info->bandwidth_min);
- info->bandwidth_min = 1;
- }
-
- info->bandwidth_max = (shmem_info.config &
- FUNC_MF_CFG_MAX_BW_MASK) >>
- FUNC_MF_CFG_MAX_BW_SHIFT;
- if (info->bandwidth_max < 1 || info->bandwidth_max > 100) {
- DP_INFO(p_hwfn,
- "bandwidth maximum out of bounds [%02x]. Set to 100\n",
- info->bandwidth_max);
- info->bandwidth_max = 100;
- }
+ qed_read_pf_bandwidth(p_hwfn, &shmem_info);
if (shmem_info.mac_upper || shmem_info.mac_lower) {
info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
@@ -914,6 +1057,9 @@ int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
{
u32 flash_size;
+ if (IS_VF(p_hwfn->cdev))
+ return -EINVAL;
+
flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
@@ -924,6 +1070,37 @@ int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
return 0;
}
+int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 vf_id, u8 num)
+{
+ u32 resp = 0, param = 0, rc_param = 0;
+ int rc;
+
+ /* Only Leader can configure MSIX, and need to take CMT into account */
+ if (!IS_LEAD_HWFN(p_hwfn))
+ return 0;
+ num *= p_hwfn->cdev->num_hwfns;
+
+ param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
+ DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
+ param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
+ DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
+ &resp, &rc_param);
+
+ if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
+ DP_NOTICE(p_hwfn, "VF[%d]: MFW failed to set MSI-X\n", vf_id);
+ rc = -EINVAL;
+ } else {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
+ num, vf_id);
+ }
+
+ return rc;
+}
+
int
qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -938,9 +1115,10 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
p_drv_version = &union_data.drv_version;
p_drv_version->version = p_ver->version;
+
for (i = 0; i < MCP_DRV_VER_STR_SIZE - 1; i += 4) {
val = cpu_to_be32(p_ver->name[i]);
- *(u32 *)&p_drv_version->name[i * sizeof(u32)] = val;
+ *(__be32 *)&p_drv_version->name[i * sizeof(u32)] = val;
}
memset(&mb_params, 0, sizeof(mb_params));
@@ -979,3 +1157,45 @@ int qed_mcp_set_led(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
return rc;
}
+
+int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 drv_mb_param = 0, rsp, param;
+ int rc = 0;
+
+ drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
+ DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
+ drv_mb_param, &rsp, &param);
+
+ if (rc)
+ return rc;
+
+ if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
+ (param != DRV_MB_PARAM_BIST_RC_PASSED))
+ rc = -EAGAIN;
+
+ return rc;
+}
+
+int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 drv_mb_param, rsp, param;
+ int rc = 0;
+
+ drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
+ DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
+ drv_mb_param, &rsp, &param);
+
+ if (rc)
+ return rc;
+
+ if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
+ (param != DRV_MB_PARAM_BIST_RC_PASSED))
+ rc = -EAGAIN;
+
+ return rc;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 50917a2..6dd59eb 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -40,7 +40,15 @@ struct qed_mcp_link_capabilities {
struct qed_mcp_link_state {
bool link_up;
- u32 speed; /* In Mb/s */
+ u32 min_pf_rate;
+
+ /* Actual link speed in Mb/s */
+ u32 line_speed;
+
+ /* PF max speed in Mb/s, deduced from line_speed
+ * according to PF max bandwidth configuration.
+ */
+ u32 speed;
bool full_duplex;
bool an;
@@ -141,13 +149,16 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
/**
* @brief Get the management firmware version value
*
- * @param cdev - qed dev pointer
- * @param mfw_ver - mfw version value
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_mfw_ver - mfw version value
+ * @param p_running_bundle_id - image id in nvram; Optional.
*
- * @return int - 0 - operation was successul.
+ * @return int - 0 - operation was successful.
*/
-int qed_mcp_get_mfw_ver(struct qed_dev *cdev,
- u32 *mfw_ver);
+int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *p_mfw_ver, u32 *p_running_bundle_id);
/**
* @brief Get media type value of the port.
@@ -237,6 +248,28 @@ int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_led_mode mode);
+/**
+ * @brief Bist register test
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ *
+ * @return int - 0 - operation was successful.
+ */
+int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief Bist clock test
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ *
+ * @return int - 0 - operation was successful.
+ */
+int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
/* Using hwfn number (and not pf_num) is required since in CMT mode,
* same pf_num may be used by two different hwfn
* TODO - this shouldn't really be in .h file, but until all fields
@@ -360,6 +393,18 @@ void qed_mcp_read_mb(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
+ * @brief Ack to mfw that driver finished FLR process for VFs
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vfs_to_ack - bit mask of all engine VFs for which the PF acks.
+ *
+ * @param return int - 0 upon success.
+ */
+int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *vfs_to_ack);
+
+/**
* @brief - calls during init to read shmem of all function-related info.
*
* @param p_hwfn
@@ -389,4 +434,27 @@ int qed_mcp_reset(struct qed_hwfn *p_hwfn,
*/
bool qed_mcp_is_init(struct qed_hwfn *p_hwfn);
+/**
+ * @brief request MFW to configure MSI-X for a VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vf_id - absolute inside engine
+ * @param num_sbs - number of entries to request
+ *
+ * @return int
+ */
+int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 vf_id, u8 num);
+
+int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw);
+int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw);
+int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mcp_link_state *p_link,
+ u8 max_bw);
+int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mcp_link_state *p_link,
+ u8 min_bw);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index c15b162..3a6c506 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -39,6 +39,10 @@
0x2aae04UL
#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER \
0x2aa16cUL
+#define PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR \
+ 0x2aa118UL
+#define PSWHST_REG_ZONE_PERMISSION_TABLE \
+ 0x2a0800UL
#define BAR0_MAP_REG_MSDM_RAM \
0x1d00000UL
#define BAR0_MAP_REG_USDM_RAM \
@@ -77,6 +81,8 @@
0x2f2eb0UL
#define DORQ_REG_PF_DB_ENABLE \
0x100508UL
+#define DORQ_REG_VF_USAGE_CNT \
+ 0x1009c4UL
#define QM_REG_PF_EN \
0x2f2ea4UL
#define TCFC_REG_STRONG_ENABLE_PF \
@@ -111,6 +117,8 @@
0x009778UL
#define MISCS_REG_CHIP_METAL \
0x009774UL
+#define MISCS_REG_FUNCTION_HIDE \
+ 0x0096f0UL
#define BRB_REG_HEADER_SIZE \
0x340804UL
#define BTB_REG_HEADER_SIZE \
@@ -119,6 +127,8 @@
0x1c0708UL
#define CCFC_REG_ACTIVITY_COUNTER \
0x2e8800UL
+#define CCFC_REG_STRONG_ENABLE_VF \
+ 0x2e070cUL
#define CDU_REG_CID_ADDR_PARAMS \
0x580900UL
#define DBG_REG_CLIENT_ENABLE \
@@ -161,6 +171,10 @@
0x040200UL
#define PBF_REG_INIT \
0xd80000UL
+#define PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 \
+ 0xd806c8UL
+#define PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 \
+ 0xd806ccUL
#define PTU_REG_ATC_INIT_ARRAY \
0x560000UL
#define PCM_REG_INIT \
@@ -385,6 +399,8 @@
0x1d0000UL
#define IGU_REG_PF_CONFIGURATION \
0x180800UL
+#define IGU_REG_VF_CONFIGURATION \
+ 0x180804UL
#define MISC_REG_AEU_ENABLE1_IGU_OUT_0 \
0x00849cUL
#define MISC_REG_AEU_AFTER_INVERT_1_IGU \
@@ -411,6 +427,10 @@
0x1 << 0)
#define IGU_REG_MAPPING_MEMORY \
0x184000UL
+#define IGU_REG_STATISTIC_NUM_VF_MSG_SENT \
+ 0x180408UL
+#define IGU_REG_WRITE_DONE_PENDING \
+ 0x180900UL
#define MISCS_REG_GENERIC_POR_0 \
0x0096d4UL
#define MCP_REG_NVM_CFG4 \
@@ -427,4 +447,37 @@
0x2aae60UL
#define PGLUE_B_REG_PF_BAR1_SIZE \
0x2aae64UL
+#define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL
+#define PRS_REG_GRE_PROTOCOL 0x1f0734UL
+#define PRS_REG_VXLAN_PORT 0x1f0738UL
+#define PRS_REG_OUTPUT_FORMAT_4_0 0x1f099cUL
+#define NIG_REG_ENC_TYPE_ENABLE 0x501058UL
+
+#define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE (0x1 << 0)
+#define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT 0
+#define NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE (0x1 << 1)
+#define NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT 1
+#define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE (0x1 << 2)
+#define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT 2
+
+#define NIG_REG_VXLAN_PORT 0x50105cUL
+#define PBF_REG_VXLAN_PORT 0xd80518UL
+#define PBF_REG_NGE_PORT 0xd8051cUL
+#define PRS_REG_NGE_PORT 0x1f086cUL
+#define NIG_REG_NGE_PORT 0x508b38UL
+
+#define DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN 0x10090cUL
+#define DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN 0x100910UL
+#define DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN 0x100914UL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN 0x10092cUL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN 0x100930UL
+
+#define NIG_REG_NGE_IP_ENABLE 0x508b28UL
+#define NIG_REG_NGE_ETH_ENABLE 0x508b2cUL
+#define NIG_REG_NGE_COMP_VER 0x508b30UL
+#define PBF_REG_NGE_COMP_VER 0xd80524UL
+#define PRS_REG_NGE_COMP_VER 0x1f0878UL
+
+#define QM_REG_WFQPFWEIGHT 0x2f4e80UL
+#define QM_REG_WFQVPWEIGHT 0x2fa000UL
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.c b/drivers/net/ethernet/qlogic/qed/qed_selftest.c
new file mode 100644
index 0000000..a342bfe
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.c
@@ -0,0 +1,76 @@
+#include "qed.h"
+#include "qed_dev_api.h"
+#include "qed_mcp.h"
+#include "qed_sp.h"
+
+int qed_selftest_memory(struct qed_dev *cdev)
+{
+ int rc = 0, i;
+
+ for_each_hwfn(cdev, i) {
+ rc = qed_sp_heartbeat_ramrod(&cdev->hwfns[i]);
+ if (rc)
+ return rc;
+ }
+
+ return rc;
+}
+
+int qed_selftest_interrupt(struct qed_dev *cdev)
+{
+ int rc = 0, i;
+
+ for_each_hwfn(cdev, i) {
+ rc = qed_sp_heartbeat_ramrod(&cdev->hwfns[i]);
+ if (rc)
+ return rc;
+ }
+
+ return rc;
+}
+
+int qed_selftest_register(struct qed_dev *cdev)
+{
+ struct qed_hwfn *p_hwfn;
+ struct qed_ptt *p_ptt;
+ int rc = 0, i;
+
+ /* although performed by MCP, this test is per engine */
+ for_each_hwfn(cdev, i) {
+ p_hwfn = &cdev->hwfns[i];
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_ERR(p_hwfn, "failed to acquire ptt\n");
+ return -EBUSY;
+ }
+ rc = qed_mcp_bist_register_test(p_hwfn, p_ptt);
+ qed_ptt_release(p_hwfn, p_ptt);
+ if (rc)
+ break;
+ }
+
+ return rc;
+}
+
+int qed_selftest_clock(struct qed_dev *cdev)
+{
+ struct qed_hwfn *p_hwfn;
+ struct qed_ptt *p_ptt;
+ int rc = 0, i;
+
+ /* although performed by MCP, this test is per engine */
+ for_each_hwfn(cdev, i) {
+ p_hwfn = &cdev->hwfns[i];
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_ERR(p_hwfn, "failed to acquire ptt\n");
+ return -EBUSY;
+ }
+ rc = qed_mcp_bist_clock_test(p_hwfn, p_ptt);
+ qed_ptt_release(p_hwfn, p_ptt);
+ if (rc)
+ break;
+ }
+
+ return rc;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.h b/drivers/net/ethernet/qlogic/qed/qed_selftest.h
new file mode 100644
index 0000000..50eb0b49
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.h
@@ -0,0 +1,40 @@
+#ifndef _QED_SELFTEST_API_H
+#define _QED_SELFTEST_API_H
+#include <linux/types.h>
+
+/**
+ * @brief qed_selftest_memory - Perform memory test
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_selftest_memory(struct qed_dev *cdev);
+
+/**
+ * @brief qed_selftest_interrupt - Perform interrupt test
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_selftest_interrupt(struct qed_dev *cdev);
+
+/**
+ * @brief qed_selftest_register - Perform register test
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_selftest_register(struct qed_dev *cdev);
+
+/**
+ * @brief qed_selftest_clock - Perform clock test
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_selftest_clock(struct qed_dev *cdev);
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index d39f914..ea4e9ce 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -52,6 +52,7 @@ int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
union ramrod_data {
struct pf_start_ramrod_data pf_start;
+ struct pf_update_ramrod_data pf_update;
struct rx_queue_start_ramrod_data rx_queue_start;
struct rx_queue_update_ramrod_data rx_queue_update;
struct rx_queue_stop_ramrod_data rx_queue_stop;
@@ -61,6 +62,9 @@ union ramrod_data {
struct vport_stop_ramrod_data vport_stop;
struct vport_update_ramrod_data vport_update;
struct vport_filter_update_ramrod_data vport_filter_update;
+
+ struct vf_start_ramrod_data vf_start;
+ struct vf_stop_ramrod_data vf_stop;
};
#define EQ_MAX_CREDIT 0xffffffff
@@ -338,13 +342,29 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
* to the internal RAM of the UStorm by the Function Start Ramrod.
*
* @param p_hwfn
+ * @param p_tunn
* @param mode
+ * @param allow_npar_tx_switch
*
* @return int
*/
int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
- enum qed_mf_mode mode);
+ struct qed_tunn_start_params *p_tunn,
+ enum qed_mf_mode mode, bool allow_npar_tx_switch);
+
+/**
+ * @brief qed_sp_pf_update - PF Function Update Ramrod
+ *
+ * This ramrod updates function-related parameters. Every parameter can be
+ * updated independently, according to configuration flags.
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+
+int qed_sp_pf_update(struct qed_hwfn *p_hwfn);
/**
* @brief qed_sp_pf_stop - PF Function Stop Ramrod
@@ -362,4 +382,18 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
int qed_sp_pf_stop(struct qed_hwfn *p_hwfn);
+int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
+ struct qed_tunn_update_params *p_tunn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data);
+/**
+ * @brief qed_sp_heartbeat_ramrod - Send empty Ramrod
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+
+int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn);
+
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index 1c06c37..67f6ce3 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -15,11 +15,13 @@
#include "qed.h"
#include <linux/qed/qed_chain.h>
#include "qed_cxt.h"
+#include "qed_dcbx.h"
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_int.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
+#include "qed_sriov.h"
int qed_sp_init_request(struct qed_hwfn *p_hwfn,
struct qed_spq_entry **pp_ent,
@@ -87,8 +89,218 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
return 0;
}
+static enum tunnel_clss qed_tunn_get_clss_type(u8 type)
+{
+ switch (type) {
+ case QED_TUNN_CLSS_MAC_VLAN:
+ return TUNNEL_CLSS_MAC_VLAN;
+ case QED_TUNN_CLSS_MAC_VNI:
+ return TUNNEL_CLSS_MAC_VNI;
+ case QED_TUNN_CLSS_INNER_MAC_VLAN:
+ return TUNNEL_CLSS_INNER_MAC_VLAN;
+ case QED_TUNN_CLSS_INNER_MAC_VNI:
+ return TUNNEL_CLSS_INNER_MAC_VNI;
+ default:
+ return TUNNEL_CLSS_MAC_VLAN;
+ }
+}
+
+static void
+qed_tunn_set_pf_fix_tunn_mode(struct qed_hwfn *p_hwfn,
+ struct qed_tunn_update_params *p_src,
+ struct pf_update_tunnel_config *p_tunn_cfg)
+{
+ unsigned long cached_tunn_mode = p_hwfn->cdev->tunn_mode;
+ unsigned long update_mask = p_src->tunn_mode_update_mask;
+ unsigned long tunn_mode = p_src->tunn_mode;
+ unsigned long new_tunn_mode = 0;
+
+ if (test_bit(QED_MODE_L2GRE_TUNN, &update_mask)) {
+ if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
+ __set_bit(QED_MODE_L2GRE_TUNN, &new_tunn_mode);
+ } else {
+ if (test_bit(QED_MODE_L2GRE_TUNN, &cached_tunn_mode))
+ __set_bit(QED_MODE_L2GRE_TUNN, &new_tunn_mode);
+ }
+
+ if (test_bit(QED_MODE_IPGRE_TUNN, &update_mask)) {
+ if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
+ __set_bit(QED_MODE_IPGRE_TUNN, &new_tunn_mode);
+ } else {
+ if (test_bit(QED_MODE_IPGRE_TUNN, &cached_tunn_mode))
+ __set_bit(QED_MODE_IPGRE_TUNN, &new_tunn_mode);
+ }
+
+ if (test_bit(QED_MODE_VXLAN_TUNN, &update_mask)) {
+ if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
+ __set_bit(QED_MODE_VXLAN_TUNN, &new_tunn_mode);
+ } else {
+ if (test_bit(QED_MODE_VXLAN_TUNN, &cached_tunn_mode))
+ __set_bit(QED_MODE_VXLAN_TUNN, &new_tunn_mode);
+ }
+
+ if (p_src->update_geneve_udp_port) {
+ p_tunn_cfg->set_geneve_udp_port_flg = 1;
+ p_tunn_cfg->geneve_udp_port =
+ cpu_to_le16(p_src->geneve_udp_port);
+ }
+
+ if (test_bit(QED_MODE_L2GENEVE_TUNN, &update_mask)) {
+ if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
+ __set_bit(QED_MODE_L2GENEVE_TUNN, &new_tunn_mode);
+ } else {
+ if (test_bit(QED_MODE_L2GENEVE_TUNN, &cached_tunn_mode))
+ __set_bit(QED_MODE_L2GENEVE_TUNN, &new_tunn_mode);
+ }
+
+ if (test_bit(QED_MODE_IPGENEVE_TUNN, &update_mask)) {
+ if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
+ __set_bit(QED_MODE_IPGENEVE_TUNN, &new_tunn_mode);
+ } else {
+ if (test_bit(QED_MODE_IPGENEVE_TUNN, &cached_tunn_mode))
+ __set_bit(QED_MODE_IPGENEVE_TUNN, &new_tunn_mode);
+ }
+
+ p_src->tunn_mode = new_tunn_mode;
+}
+
+static void
+qed_tunn_set_pf_update_params(struct qed_hwfn *p_hwfn,
+ struct qed_tunn_update_params *p_src,
+ struct pf_update_tunnel_config *p_tunn_cfg)
+{
+ unsigned long tunn_mode = p_src->tunn_mode;
+ enum tunnel_clss type;
+
+ qed_tunn_set_pf_fix_tunn_mode(p_hwfn, p_src, p_tunn_cfg);
+ p_tunn_cfg->update_rx_pf_clss = p_src->update_rx_pf_clss;
+ p_tunn_cfg->update_tx_pf_clss = p_src->update_tx_pf_clss;
+
+ type = qed_tunn_get_clss_type(p_src->tunn_clss_vxlan);
+ p_tunn_cfg->tunnel_clss_vxlan = type;
+
+ type = qed_tunn_get_clss_type(p_src->tunn_clss_l2gre);
+ p_tunn_cfg->tunnel_clss_l2gre = type;
+
+ type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgre);
+ p_tunn_cfg->tunnel_clss_ipgre = type;
+
+ if (p_src->update_vxlan_udp_port) {
+ p_tunn_cfg->set_vxlan_udp_port_flg = 1;
+ p_tunn_cfg->vxlan_udp_port = cpu_to_le16(p_src->vxlan_udp_port);
+ }
+
+ if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_l2gre = 1;
+
+ if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_ipgre = 1;
+
+ if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_vxlan = 1;
+
+ if (p_src->update_geneve_udp_port) {
+ p_tunn_cfg->set_geneve_udp_port_flg = 1;
+ p_tunn_cfg->geneve_udp_port =
+ cpu_to_le16(p_src->geneve_udp_port);
+ }
+
+ if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_l2geneve = 1;
+
+ if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_ipgeneve = 1;
+
+ type = qed_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
+ p_tunn_cfg->tunnel_clss_l2geneve = type;
+
+ type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
+ p_tunn_cfg->tunnel_clss_ipgeneve = type;
+}
+
+static void qed_set_hw_tunn_mode(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ unsigned long tunn_mode)
+{
+ u8 l2gre_enable = 0, ipgre_enable = 0, vxlan_enable = 0;
+ u8 l2geneve_enable = 0, ipgeneve_enable = 0;
+
+ if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
+ l2gre_enable = 1;
+
+ if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
+ ipgre_enable = 1;
+
+ if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
+ vxlan_enable = 1;
+
+ qed_set_gre_enable(p_hwfn, p_ptt, l2gre_enable, ipgre_enable);
+ qed_set_vxlan_enable(p_hwfn, p_ptt, vxlan_enable);
+
+ if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
+ l2geneve_enable = 1;
+
+ if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
+ ipgeneve_enable = 1;
+
+ qed_set_geneve_enable(p_hwfn, p_ptt, l2geneve_enable,
+ ipgeneve_enable);
+}
+
+static void
+qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn,
+ struct qed_tunn_start_params *p_src,
+ struct pf_start_tunnel_config *p_tunn_cfg)
+{
+ unsigned long tunn_mode;
+ enum tunnel_clss type;
+
+ if (!p_src)
+ return;
+
+ tunn_mode = p_src->tunn_mode;
+ type = qed_tunn_get_clss_type(p_src->tunn_clss_vxlan);
+ p_tunn_cfg->tunnel_clss_vxlan = type;
+ type = qed_tunn_get_clss_type(p_src->tunn_clss_l2gre);
+ p_tunn_cfg->tunnel_clss_l2gre = type;
+ type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgre);
+ p_tunn_cfg->tunnel_clss_ipgre = type;
+
+ if (p_src->update_vxlan_udp_port) {
+ p_tunn_cfg->set_vxlan_udp_port_flg = 1;
+ p_tunn_cfg->vxlan_udp_port = cpu_to_le16(p_src->vxlan_udp_port);
+ }
+
+ if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_l2gre = 1;
+
+ if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_ipgre = 1;
+
+ if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_vxlan = 1;
+
+ if (p_src->update_geneve_udp_port) {
+ p_tunn_cfg->set_geneve_udp_port_flg = 1;
+ p_tunn_cfg->geneve_udp_port =
+ cpu_to_le16(p_src->geneve_udp_port);
+ }
+
+ if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_l2geneve = 1;
+
+ if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_ipgeneve = 1;
+
+ type = qed_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
+ p_tunn_cfg->tunnel_clss_l2geneve = type;
+ type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
+ p_tunn_cfg->tunnel_clss_ipgeneve = type;
+}
+
int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
- enum qed_mf_mode mode)
+ struct qed_tunn_start_params *p_tunn,
+ enum qed_mf_mode mode, bool allow_npar_tx_switch)
{
struct pf_start_ramrod_data *p_ramrod = NULL;
u16 sb = qed_int_get_sp_sb_id(p_hwfn);
@@ -143,16 +355,103 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
p_hwfn->p_consq->chain.pbl.p_phys_table);
+ qed_tunn_set_pf_start_params(p_hwfn, p_tunn,
+ &p_ramrod->tunnel_config);
p_hwfn->hw_info.personality = PERSONALITY_ETH;
+ if (IS_MF_SI(p_hwfn))
+ p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
+
+ if (p_hwfn->cdev->p_iov_info) {
+ struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
+
+ p_ramrod->base_vf_id = (u8) p_iov->first_vf_in_pf;
+ p_ramrod->num_vfs = (u8) p_iov->total_vfs;
+ }
+
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
"Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
sb, sb_index,
p_ramrod->outer_tag);
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+ if (p_tunn) {
+ qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt,
+ p_tunn->tunn_mode);
+ p_hwfn->cdev->tunn_mode = p_tunn->tunn_mode;
+ }
+
+ return rc;
+}
+
+int qed_sp_pf_update(struct qed_hwfn *p_hwfn)
+{
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_CB;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
+ &init_data);
+ if (rc)
+ return rc;
+
+ qed_dcbx_set_pf_update_params(&p_hwfn->p_dcbx_info->results,
+ &p_ent->ramrod.pf_update);
+
return qed_spq_post(p_hwfn, p_ent, NULL);
}
+/* Set pf update ramrod command params */
+int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
+ struct qed_tunn_update_params *p_tunn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
+{
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_data;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
+ &init_data);
+ if (rc)
+ return rc;
+
+ qed_tunn_set_pf_update_params(p_hwfn, p_tunn,
+ &p_ent->ramrod.pf_update.tunnel_config);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ return rc;
+
+ if (p_tunn->update_vxlan_udp_port)
+ qed_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+ p_tunn->vxlan_udp_port);
+ if (p_tunn->update_geneve_udp_port)
+ qed_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+ p_tunn->geneve_udp_port);
+
+ qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn->tunn_mode);
+ p_hwfn->cdev->tunn_mode = p_tunn->tunn_mode;
+
+ return rc;
+}
+
int qed_sp_pf_stop(struct qed_hwfn *p_hwfn)
{
struct qed_spq_entry *p_ent = NULL;
@@ -173,3 +472,24 @@ int qed_sp_pf_stop(struct qed_hwfn *p_hwfn)
return qed_spq_post(p_hwfn, p_ent, NULL);
}
+
+int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn)
+{
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_EMPTY, PROTOCOLID_COMMON,
+ &init_data);
+ if (rc)
+ return rc;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index 89469d5..acac662 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -27,6 +27,7 @@
#include "qed_mcp.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
+#include "qed_sriov.h"
/***************************************************************************
* Structures & Definitions
@@ -242,10 +243,17 @@ static int
qed_async_event_completion(struct qed_hwfn *p_hwfn,
struct event_ring_entry *p_eqe)
{
- DP_NOTICE(p_hwfn,
- "Unknown Async completion for protocol: %d\n",
- p_eqe->protocol_id);
- return -EINVAL;
+ switch (p_eqe->protocol_id) {
+ case PROTOCOLID_COMMON:
+ return qed_sriov_eqe_event(p_hwfn,
+ p_eqe->opcode,
+ p_eqe->echo, &p_eqe->data);
+ default:
+ DP_NOTICE(p_hwfn,
+ "Unknown Async completion for protocol: %d\n",
+ p_eqe->protocol_id);
+ return -EINVAL;
+ }
}
/***************************************************************************
@@ -379,6 +387,9 @@ static int qed_cqe_completion(
struct eth_slow_path_rx_cqe *cqe,
enum protocol_type protocol)
{
+ if (IS_VF(p_hwfn->cdev))
+ return 0;
+
/* @@@tmp - it's possible we'll eventually want to handle some
* actual commands that can arrive here, but for now this is only
* used to complete the ramrod using the echo value on the cqe
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
new file mode 100644
index 0000000..c325ee8
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -0,0 +1,3613 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/crc32.h>
+#include <linux/qed/qed_iov_if.h>
+#include "qed_cxt.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_int.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+#include "qed_sriov.h"
+#include "qed_vf.h"
+
+/* IOV ramrods */
+static int qed_sp_vf_start(struct qed_hwfn *p_hwfn,
+ u32 concrete_vfid, u16 opaque_vfid)
+{
+ struct vf_start_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = opaque_vfid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_VF_START,
+ PROTOCOLID_COMMON, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.vf_start;
+
+ p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
+ p_ramrod->opaque_fid = cpu_to_le16(opaque_vfid);
+
+ p_ramrod->personality = PERSONALITY_ETH;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn,
+ u32 concrete_vfid, u16 opaque_vfid)
+{
+ struct vf_stop_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = opaque_vfid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_VF_STOP,
+ PROTOCOLID_COMMON, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.vf_stop;
+
+ p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
+ int rel_vf_id, bool b_enabled_only)
+{
+ if (!p_hwfn->pf_iov_info) {
+ DP_NOTICE(p_hwfn->cdev, "No iov info\n");
+ return false;
+ }
+
+ if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) ||
+ (rel_vf_id < 0))
+ return false;
+
+ if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
+ b_enabled_only)
+ return false;
+
+ return true;
+}
+
+static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
+ u16 relative_vf_id,
+ bool b_enabled_only)
+{
+ struct qed_vf_info *vf = NULL;
+
+ if (!p_hwfn->pf_iov_info) {
+ DP_NOTICE(p_hwfn->cdev, "No iov info\n");
+ return NULL;
+ }
+
+ if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id, b_enabled_only))
+ vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
+ else
+ DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n",
+ relative_vf_id);
+
+ return vf;
+}
+
+int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
+ int vfid, struct qed_ptt *p_ptt)
+{
+ struct qed_bulletin_content *p_bulletin;
+ int crc_size = sizeof(p_bulletin->crc);
+ struct qed_dmae_params params;
+ struct qed_vf_info *p_vf;
+
+ p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ if (!p_vf)
+ return -EINVAL;
+
+ if (!p_vf->vf_bulletin)
+ return -EINVAL;
+
+ p_bulletin = p_vf->bulletin.p_virt;
+
+ /* Increment bulletin board version and compute crc */
+ p_bulletin->version++;
+ p_bulletin->crc = crc32(0, (u8 *)p_bulletin + crc_size,
+ p_vf->bulletin.size - crc_size);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
+ p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc);
+
+ /* propagate bulletin board via dmae to vm memory */
+ memset(&params, 0, sizeof(params));
+ params.flags = QED_DMAE_FLAG_VF_DST;
+ params.dst_vfid = p_vf->abs_vf_id;
+ return qed_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
+ p_vf->vf_bulletin, p_vf->bulletin.size / 4,
+ &params);
+}
+
+static int qed_iov_pci_cfg_info(struct qed_dev *cdev)
+{
+ struct qed_hw_sriov_info *iov = cdev->p_iov_info;
+ int pos = iov->pos;
+
+ DP_VERBOSE(cdev, QED_MSG_IOV, "sriov ext pos %d\n", pos);
+ pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
+
+ pci_read_config_word(cdev->pdev,
+ pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
+ pci_read_config_word(cdev->pdev,
+ pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs);
+
+ pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
+ if (iov->num_vfs) {
+ DP_VERBOSE(cdev,
+ QED_MSG_IOV,
+ "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n");
+ iov->num_vfs = 0;
+ }
+
+ pci_read_config_word(cdev->pdev,
+ pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
+
+ pci_read_config_word(cdev->pdev,
+ pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
+
+ pci_read_config_word(cdev->pdev,
+ pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
+
+ pci_read_config_dword(cdev->pdev,
+ pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
+
+ pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_CAP, &iov->cap);
+
+ pci_read_config_byte(cdev->pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
+
+ DP_VERBOSE(cdev,
+ QED_MSG_IOV,
+ "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
+ iov->nres,
+ iov->cap,
+ iov->ctrl,
+ iov->total_vfs,
+ iov->initial_vfs,
+ iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
+
+ /* Some sanity checks */
+ if (iov->num_vfs > NUM_OF_VFS(cdev) ||
+ iov->total_vfs > NUM_OF_VFS(cdev)) {
+ /* This can happen only due to a bug. In this case we set
+ * num_vfs to zero to avoid memory corruption in the code that
+ * assumes max number of vfs
+ */
+ DP_NOTICE(cdev,
+ "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n",
+ iov->num_vfs);
+
+ iov->num_vfs = 0;
+ iov->total_vfs = 0;
+ }
+
+ return 0;
+}
+
+static void qed_iov_clear_vf_igu_blocks(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_igu_block *p_sb;
+ u16 sb_id;
+ u32 val;
+
+ if (!p_hwfn->hw_info.p_igu_info) {
+ DP_ERR(p_hwfn,
+ "qed_iov_clear_vf_igu_blocks IGU Info not initialized\n");
+ return;
+ }
+
+ for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
+ sb_id++) {
+ p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
+ if ((p_sb->status & QED_IGU_STATUS_FREE) &&
+ !(p_sb->status & QED_IGU_STATUS_PF)) {
+ val = qed_rd(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY + sb_id * 4);
+ SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
+ qed_wr(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY + 4 * sb_id, val);
+ }
+ }
+}
+
+static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn)
+{
+ struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
+ struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
+ struct qed_bulletin_content *p_bulletin_virt;
+ dma_addr_t req_p, rply_p, bulletin_p;
+ union pfvf_tlvs *p_reply_virt_addr;
+ union vfpf_tlvs *p_req_virt_addr;
+ u8 idx = 0;
+
+ memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
+
+ p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
+ req_p = p_iov_info->mbx_msg_phys_addr;
+ p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
+ rply_p = p_iov_info->mbx_reply_phys_addr;
+ p_bulletin_virt = p_iov_info->p_bulletins;
+ bulletin_p = p_iov_info->bulletins_phys;
+ if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
+ DP_ERR(p_hwfn,
+ "qed_iov_setup_vfdb called without allocating mem first\n");
+ return;
+ }
+
+ for (idx = 0; idx < p_iov->total_vfs; idx++) {
+ struct qed_vf_info *vf = &p_iov_info->vfs_array[idx];
+ u32 concrete;
+
+ vf->vf_mbx.req_virt = p_req_virt_addr + idx;
+ vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
+ vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
+ vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
+
+ vf->state = VF_STOPPED;
+ vf->b_init = false;
+
+ vf->bulletin.phys = idx *
+ sizeof(struct qed_bulletin_content) +
+ bulletin_p;
+ vf->bulletin.p_virt = p_bulletin_virt + idx;
+ vf->bulletin.size = sizeof(struct qed_bulletin_content);
+
+ vf->relative_vf_id = idx;
+ vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
+ concrete = qed_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
+ vf->concrete_fid = concrete;
+ vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
+ (vf->abs_vf_id << 8);
+ vf->vport_id = idx + 1;
+ }
+}
+
+static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn)
+{
+ struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
+ void **p_v_addr;
+ u16 num_vfs = 0;
+
+ num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "qed_iov_allocate_vfdb for %d VFs\n", num_vfs);
+
+ /* Allocate PF Mailbox buffer (per-VF) */
+ p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
+ p_v_addr = &p_iov_info->mbx_msg_virt_addr;
+ *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ p_iov_info->mbx_msg_size,
+ &p_iov_info->mbx_msg_phys_addr,
+ GFP_KERNEL);
+ if (!*p_v_addr)
+ return -ENOMEM;
+
+ /* Allocate PF Mailbox Reply buffer (per-VF) */
+ p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
+ p_v_addr = &p_iov_info->mbx_reply_virt_addr;
+ *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ p_iov_info->mbx_reply_size,
+ &p_iov_info->mbx_reply_phys_addr,
+ GFP_KERNEL);
+ if (!*p_v_addr)
+ return -ENOMEM;
+
+ p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) *
+ num_vfs;
+ p_v_addr = &p_iov_info->p_bulletins;
+ *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ p_iov_info->bulletins_size,
+ &p_iov_info->bulletins_phys,
+ GFP_KERNEL);
+ if (!*p_v_addr)
+ return -ENOMEM;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
+ p_iov_info->mbx_msg_virt_addr,
+ (u64) p_iov_info->mbx_msg_phys_addr,
+ p_iov_info->mbx_reply_virt_addr,
+ (u64) p_iov_info->mbx_reply_phys_addr,
+ p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys);
+
+ return 0;
+}
+
+static void qed_iov_free_vfdb(struct qed_hwfn *p_hwfn)
+{
+ struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
+
+ if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ p_iov_info->mbx_msg_size,
+ p_iov_info->mbx_msg_virt_addr,
+ p_iov_info->mbx_msg_phys_addr);
+
+ if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ p_iov_info->mbx_reply_size,
+ p_iov_info->mbx_reply_virt_addr,
+ p_iov_info->mbx_reply_phys_addr);
+
+ if (p_iov_info->p_bulletins)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ p_iov_info->bulletins_size,
+ p_iov_info->p_bulletins,
+ p_iov_info->bulletins_phys);
+}
+
+int qed_iov_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_pf_iov *p_sriov;
+
+ if (!IS_PF_SRIOV(p_hwfn)) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "No SR-IOV - no need for IOV db\n");
+ return 0;
+ }
+
+ p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL);
+ if (!p_sriov) {
+ DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sriov'\n");
+ return -ENOMEM;
+ }
+
+ p_hwfn->pf_iov_info = p_sriov;
+
+ return qed_iov_allocate_vfdb(p_hwfn);
+}
+
+void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
+ return;
+
+ qed_iov_setup_vfdb(p_hwfn);
+ qed_iov_clear_vf_igu_blocks(p_hwfn, p_ptt);
+}
+
+void qed_iov_free(struct qed_hwfn *p_hwfn)
+{
+ if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
+ qed_iov_free_vfdb(p_hwfn);
+ kfree(p_hwfn->pf_iov_info);
+ }
+}
+
+void qed_iov_free_hw_info(struct qed_dev *cdev)
+{
+ kfree(cdev->p_iov_info);
+ cdev->p_iov_info = NULL;
+}
+
+int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ int pos;
+ int rc;
+
+ if (IS_VF(p_hwfn->cdev))
+ return 0;
+
+ /* Learn the PCI configuration */
+ pos = pci_find_ext_capability(p_hwfn->cdev->pdev,
+ PCI_EXT_CAP_ID_SRIOV);
+ if (!pos) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No PCIe IOV support\n");
+ return 0;
+ }
+
+ /* Allocate a new struct for IOV information */
+ cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL);
+ if (!cdev->p_iov_info) {
+ DP_NOTICE(p_hwfn, "Can't support IOV due to lack of memory\n");
+ return -ENOMEM;
+ }
+ cdev->p_iov_info->pos = pos;
+
+ rc = qed_iov_pci_cfg_info(cdev);
+ if (rc)
+ return rc;
+
+ /* We want PF IOV to be synonemous with the existance of p_iov_info;
+ * In case the capability is published but there are no VFs, simply
+ * de-allocate the struct.
+ */
+ if (!cdev->p_iov_info->total_vfs) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "IOV capabilities, but no VFs are published\n");
+ kfree(cdev->p_iov_info);
+ cdev->p_iov_info = NULL;
+ return 0;
+ }
+
+ /* Calculate the first VF index - this is a bit tricky; Basically,
+ * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin
+ * after the first engine's VFs.
+ */
+ cdev->p_iov_info->first_vf_in_pf = p_hwfn->cdev->p_iov_info->offset +
+ p_hwfn->abs_pf_id - 16;
+ if (QED_PATH_ID(p_hwfn))
+ cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "First VF in hwfn 0x%08x\n",
+ cdev->p_iov_info->first_vf_in_pf);
+
+ return 0;
+}
+
+static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
+{
+ /* Check PF supports sriov */
+ if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) ||
+ !IS_PF_SRIOV_ALLOC(p_hwfn))
+ return false;
+
+ /* Check VF validity */
+ if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true))
+ return false;
+
+ return true;
+}
+
+static void qed_iov_set_vf_to_disable(struct qed_dev *cdev,
+ u16 rel_vf_id, u8 to_disable)
+{
+ struct qed_vf_info *vf;
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
+ if (!vf)
+ continue;
+
+ vf->to_disable = to_disable;
+ }
+}
+
+void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable)
+{
+ u16 i;
+
+ if (!IS_QED_SRIOV(cdev))
+ return;
+
+ for (i = 0; i < cdev->p_iov_info->total_vfs; i++)
+ qed_iov_set_vf_to_disable(cdev, i, to_disable);
+}
+
+static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 abs_vfid)
+{
+ qed_wr(p_hwfn, p_ptt,
+ PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
+ 1 << (abs_vfid & 0x1f));
+}
+
+static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, struct qed_vf_info *vf)
+{
+ int i;
+
+ /* Set VF masks and configuration - pretend */
+ qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
+
+ qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
+
+ /* unpretend */
+ qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+
+ /* iterate over all queues, clear sb consumer */
+ for (i = 0; i < vf->num_sbs; i++)
+ qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
+ vf->igu_sbs[i],
+ vf->opaque_fid, true);
+}
+
+static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf, bool enable)
+{
+ u32 igu_vf_conf;
+
+ qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
+
+ igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
+
+ if (enable)
+ igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
+ else
+ igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
+
+ qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
+
+ /* unpretend */
+ qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+}
+
+static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
+ int rc;
+
+ if (vf->to_disable)
+ return 0;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "Enable internal access for vf %x [abs %x]\n",
+ vf->abs_vf_id, QED_VF_ABS_ID(p_hwfn, vf));
+
+ qed_iov_vf_pglue_clear_err(p_hwfn, p_ptt, QED_VF_ABS_ID(p_hwfn, vf));
+
+ qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
+
+ rc = qed_mcp_config_vf_msix(p_hwfn, p_ptt, vf->abs_vf_id, vf->num_sbs);
+ if (rc)
+ return rc;
+
+ qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
+
+ SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
+ STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
+
+ qed_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
+ p_hwfn->hw_info.hw_mode);
+
+ /* unpretend */
+ qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+
+ if (vf->state != VF_STOPPED) {
+ DP_NOTICE(p_hwfn, "VF[%02x] is already started\n",
+ vf->abs_vf_id);
+ return -EINVAL;
+ }
+
+ /* Start VF */
+ rc = qed_sp_vf_start(p_hwfn, vf->concrete_fid, vf->opaque_fid);
+ if (rc)
+ DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id);
+
+ vf->state = VF_FREE;
+
+ return rc;
+}
+
+/**
+ * @brief qed_iov_config_perm_table - configure the permission
+ * zone table.
+ * In E4, queue zone permission table size is 320x9. There
+ * are 320 VF queues for single engine device (256 for dual
+ * engine device), and each entry has the following format:
+ * {Valid, VF[7:0]}
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vf
+ * @param enable
+ */
+static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf, u8 enable)
+{
+ u32 reg_addr, val;
+ u16 qzone_id = 0;
+ int qid;
+
+ for (qid = 0; qid < vf->num_rxqs; qid++) {
+ qed_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
+ &qzone_id);
+
+ reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
+ val = enable ? (vf->abs_vf_id | (1 << 8)) : 0;
+ qed_wr(p_hwfn, p_ptt, reg_addr, val);
+ }
+}
+
+static void qed_iov_enable_vf_traffic(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ /* Reset vf in IGU - interrupts are still disabled */
+ qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
+
+ qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
+
+ /* Permission Table */
+ qed_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
+}
+
+static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf, u16 num_rx_queues)
+{
+ struct qed_igu_block *igu_blocks;
+ int qid = 0, igu_id = 0;
+ u32 val = 0;
+
+ igu_blocks = p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks;
+
+ if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks)
+ num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks;
+ p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues;
+
+ SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
+ SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
+ SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
+
+ while ((qid < num_rx_queues) &&
+ (igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev))) {
+ if (igu_blocks[igu_id].status & QED_IGU_STATUS_FREE) {
+ struct cau_sb_entry sb_entry;
+
+ vf->igu_sbs[qid] = (u16)igu_id;
+ igu_blocks[igu_id].status &= ~QED_IGU_STATUS_FREE;
+
+ SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
+
+ qed_wr(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id,
+ val);
+
+ /* Configure igu sb in CAU which were marked valid */
+ qed_init_cau_sb_entry(p_hwfn, &sb_entry,
+ p_hwfn->rel_pf_id,
+ vf->abs_vf_id, 1);
+ qed_dmae_host2grc(p_hwfn, p_ptt,
+ (u64)(uintptr_t)&sb_entry,
+ CAU_REG_SB_VAR_MEMORY +
+ igu_id * sizeof(u64), 2, 0);
+ qid++;
+ }
+ igu_id++;
+ }
+
+ vf->num_sbs = (u8) num_rx_queues;
+
+ return vf->num_sbs;
+}
+
+static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
+ int idx, igu_id;
+ u32 addr, val;
+
+ /* Invalidate igu CAM lines and mark them as free */
+ for (idx = 0; idx < vf->num_sbs; idx++) {
+ igu_id = vf->igu_sbs[idx];
+ addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
+
+ val = qed_rd(p_hwfn, p_ptt, addr);
+ SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
+ qed_wr(p_hwfn, p_ptt, addr, val);
+
+ p_info->igu_map.igu_blocks[igu_id].status |=
+ QED_IGU_STATUS_FREE;
+
+ p_hwfn->hw_info.p_igu_info->free_blks++;
+ }
+
+ vf->num_sbs = 0;
+}
+
+static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 rel_vf_id, u16 num_rx_queues)
+{
+ u8 num_of_vf_avaiable_chains = 0;
+ struct qed_vf_info *vf = NULL;
+ int rc = 0;
+ u32 cids;
+ u8 i;
+
+ vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
+ if (!vf) {
+ DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n");
+ return -EINVAL;
+ }
+
+ if (vf->b_init) {
+ DP_NOTICE(p_hwfn, "VF[%d] is already active.\n", rel_vf_id);
+ return -EINVAL;
+ }
+
+ /* Limit number of queues according to number of CIDs */
+ qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n",
+ vf->relative_vf_id, num_rx_queues, (u16) cids);
+ num_rx_queues = min_t(u16, num_rx_queues, ((u16) cids));
+
+ num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn,
+ p_ptt,
+ vf,
+ num_rx_queues);
+ if (!num_of_vf_avaiable_chains) {
+ DP_ERR(p_hwfn, "no available igu sbs\n");
+ return -ENOMEM;
+ }
+
+ /* Choose queue number and index ranges */
+ vf->num_rxqs = num_of_vf_avaiable_chains;
+ vf->num_txqs = num_of_vf_avaiable_chains;
+
+ for (i = 0; i < vf->num_rxqs; i++) {
+ u16 queue_id = qed_int_queue_id_from_sb_id(p_hwfn,
+ vf->igu_sbs[i]);
+
+ if (queue_id > RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
+ DP_NOTICE(p_hwfn,
+ "VF[%d] will require utilizing of out-of-bounds queues - %04x\n",
+ vf->relative_vf_id, queue_id);
+ return -EINVAL;
+ }
+
+ /* CIDs are per-VF, so no problem having them 0-based. */
+ vf->vf_queues[i].fw_rx_qid = queue_id;
+ vf->vf_queues[i].fw_tx_qid = queue_id;
+ vf->vf_queues[i].fw_cid = i;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%d] - [%d] SB %04x, Tx/Rx queue %04x CID %04x\n",
+ vf->relative_vf_id, i, vf->igu_sbs[i], queue_id, i);
+ }
+ rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf);
+ if (!rc) {
+ vf->b_init = true;
+
+ if (IS_LEAD_HWFN(p_hwfn))
+ p_hwfn->cdev->p_iov_info->num_vfs++;
+ }
+
+ return rc;
+}
+
+static void qed_iov_set_link(struct qed_hwfn *p_hwfn,
+ u16 vfid,
+ struct qed_mcp_link_params *params,
+ struct qed_mcp_link_state *link,
+ struct qed_mcp_link_capabilities *p_caps)
+{
+ struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
+ vfid,
+ false);
+ struct qed_bulletin_content *p_bulletin;
+
+ if (!p_vf)
+ return;
+
+ p_bulletin = p_vf->bulletin.p_virt;
+ p_bulletin->req_autoneg = params->speed.autoneg;
+ p_bulletin->req_adv_speed = params->speed.advertised_speeds;
+ p_bulletin->req_forced_speed = params->speed.forced_speed;
+ p_bulletin->req_autoneg_pause = params->pause.autoneg;
+ p_bulletin->req_forced_rx = params->pause.forced_rx;
+ p_bulletin->req_forced_tx = params->pause.forced_tx;
+ p_bulletin->req_loopback = params->loopback_mode;
+
+ p_bulletin->link_up = link->link_up;
+ p_bulletin->speed = link->speed;
+ p_bulletin->full_duplex = link->full_duplex;
+ p_bulletin->autoneg = link->an;
+ p_bulletin->autoneg_complete = link->an_complete;
+ p_bulletin->parallel_detection = link->parallel_detection;
+ p_bulletin->pfc_enabled = link->pfc_enabled;
+ p_bulletin->partner_adv_speed = link->partner_adv_speed;
+ p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
+ p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
+ p_bulletin->partner_adv_pause = link->partner_adv_pause;
+ p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
+
+ p_bulletin->capability_speed = p_caps->speed_capabilities;
+}
+
+static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 rel_vf_id)
+{
+ struct qed_mcp_link_capabilities caps;
+ struct qed_mcp_link_params params;
+ struct qed_mcp_link_state link;
+ struct qed_vf_info *vf = NULL;
+ int rc = 0;
+
+ vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!vf) {
+ DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n");
+ return -EINVAL;
+ }
+
+ if (vf->bulletin.p_virt)
+ memset(vf->bulletin.p_virt, 0, sizeof(*vf->bulletin.p_virt));
+
+ memset(&vf->p_vf_info, 0, sizeof(vf->p_vf_info));
+
+ /* Get the link configuration back in bulletin so
+ * that when VFs are re-enabled they get the actual
+ * link configuration.
+ */
+ memcpy(&params, qed_mcp_get_link_params(p_hwfn), sizeof(params));
+ memcpy(&link, qed_mcp_get_link_state(p_hwfn), sizeof(link));
+ memcpy(&caps, qed_mcp_get_link_capabilities(p_hwfn), sizeof(caps));
+ qed_iov_set_link(p_hwfn, rel_vf_id, &params, &link, &caps);
+
+ if (vf->state != VF_STOPPED) {
+ /* Stopping the VF */
+ rc = qed_sp_vf_stop(p_hwfn, vf->concrete_fid, vf->opaque_fid);
+
+ if (rc != 0) {
+ DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n",
+ rc);
+ return rc;
+ }
+
+ vf->state = VF_STOPPED;
+ }
+
+ /* disablng interrupts and resetting permission table was done during
+ * vf-close, however, we could get here without going through vf_close
+ */
+ /* Disable Interrupts for VF */
+ qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
+
+ /* Reset Permission table */
+ qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
+
+ vf->num_rxqs = 0;
+ vf->num_txqs = 0;
+ qed_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
+
+ if (vf->b_init) {
+ vf->b_init = false;
+
+ if (IS_LEAD_HWFN(p_hwfn))
+ p_hwfn->cdev->p_iov_info->num_vfs--;
+ }
+
+ return 0;
+}
+
+static bool qed_iov_tlv_supported(u16 tlvtype)
+{
+ return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
+}
+
+/* place a given tlv on the tlv buffer, continuing current tlv list */
+void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length)
+{
+ struct channel_tlv *tl = (struct channel_tlv *)*offset;
+
+ tl->type = type;
+ tl->length = length;
+
+ /* Offset should keep pointing to next TLV (the end of the last) */
+ *offset += length;
+
+ /* Return a pointer to the start of the added tlv */
+ return *offset - length;
+}
+
+/* list the types and lengths of the tlvs on the buffer */
+void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list)
+{
+ u16 i = 1, total_length = 0;
+ struct channel_tlv *tlv;
+
+ do {
+ tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
+
+ /* output tlv */
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "TLV number %d: type %d, length %d\n",
+ i, tlv->type, tlv->length);
+
+ if (tlv->type == CHANNEL_TLV_LIST_END)
+ return;
+
+ /* Validate entry - protect against malicious VFs */
+ if (!tlv->length) {
+ DP_NOTICE(p_hwfn, "TLV of length 0 found\n");
+ return;
+ }
+
+ total_length += tlv->length;
+
+ if (total_length >= sizeof(struct tlv_buffer_size)) {
+ DP_NOTICE(p_hwfn, "TLV ==> Buffer overflow\n");
+ return;
+ }
+
+ i++;
+ } while (1);
+}
+
+static void qed_iov_send_response(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *p_vf,
+ u16 length, u8 status)
+{
+ struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+ struct qed_dmae_params params;
+ u8 eng_vf_id;
+
+ mbx->reply_virt->default_resp.hdr.status = status;
+
+ qed_dp_tlv_list(p_hwfn, mbx->reply_virt);
+
+ eng_vf_id = p_vf->abs_vf_id;
+
+ memset(&params, 0, sizeof(struct qed_dmae_params));
+ params.flags = QED_DMAE_FLAG_VF_DST;
+ params.dst_vfid = eng_vf_id;
+
+ qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
+ mbx->req_virt->first_tlv.reply_address +
+ sizeof(u64),
+ (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
+ &params);
+
+ qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
+ mbx->req_virt->first_tlv.reply_address,
+ sizeof(u64) / 4, &params);
+
+ REG_WR(p_hwfn,
+ GTT_BAR0_MAP_REG_USDM_RAM +
+ USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
+}
+
+static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn,
+ enum qed_iov_vport_update_flag flag)
+{
+ switch (flag) {
+ case QED_IOV_VP_UPDATE_ACTIVATE:
+ return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
+ case QED_IOV_VP_UPDATE_VLAN_STRIP:
+ return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
+ case QED_IOV_VP_UPDATE_TX_SWITCH:
+ return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
+ case QED_IOV_VP_UPDATE_MCAST:
+ return CHANNEL_TLV_VPORT_UPDATE_MCAST;
+ case QED_IOV_VP_UPDATE_ACCEPT_PARAM:
+ return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
+ case QED_IOV_VP_UPDATE_RSS:
+ return CHANNEL_TLV_VPORT_UPDATE_RSS;
+ case QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN:
+ return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
+ case QED_IOV_VP_UPDATE_SGE_TPA:
+ return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
+ default:
+ return 0;
+ }
+}
+
+static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf,
+ struct qed_iov_vf_mbx *p_mbx,
+ u8 status,
+ u16 tlvs_mask, u16 tlvs_accepted)
+{
+ struct pfvf_def_resp_tlv *resp;
+ u16 size, total_len, i;
+
+ memset(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
+ p_mbx->offset = (u8 *)p_mbx->reply_virt;
+ size = sizeof(struct pfvf_def_resp_tlv);
+ total_len = size;
+
+ qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
+
+ /* Prepare response for all extended tlvs if they are found by PF */
+ for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) {
+ if (!(tlvs_mask & (1 << i)))
+ continue;
+
+ resp = qed_add_tlv(p_hwfn, &p_mbx->offset,
+ qed_iov_vport_to_tlv(p_hwfn, i), size);
+
+ if (tlvs_accepted & (1 << i))
+ resp->hdr.status = status;
+ else
+ resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%d] - vport_update response: TLV %d, status %02x\n",
+ p_vf->relative_vf_id,
+ qed_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status);
+
+ total_len += size;
+ }
+
+ qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ return total_len;
+}
+
+static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf_info,
+ u16 type, u16 length, u8 status)
+{
+ struct qed_iov_vf_mbx *mbx = &vf_info->vf_mbx;
+
+ mbx->offset = (u8 *)mbx->reply_virt;
+
+ qed_add_tlv(p_hwfn, &mbx->offset, type, length);
+ qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
+}
+
+struct qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn,
+ u16 relative_vf_id,
+ bool b_enabled_only)
+{
+ struct qed_vf_info *vf = NULL;
+
+ vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
+ if (!vf)
+ return NULL;
+
+ return &vf->p_vf_info;
+}
+
+void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid)
+{
+ struct qed_public_vf_info *vf_info;
+
+ vf_info = qed_iov_get_public_vf_info(p_hwfn, vfid, false);
+
+ if (!vf_info)
+ return;
+
+ /* Clear the VF mac */
+ memset(vf_info->mac, 0, ETH_ALEN);
+}
+
+static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf)
+{
+ u32 i;
+
+ p_vf->vf_bulletin = 0;
+ p_vf->vport_instance = 0;
+ p_vf->num_mac_filters = 0;
+ p_vf->num_vlan_filters = 0;
+ p_vf->configured_features = 0;
+
+ /* If VF previously requested less resources, go back to default */
+ p_vf->num_rxqs = p_vf->num_sbs;
+ p_vf->num_txqs = p_vf->num_sbs;
+
+ p_vf->num_active_rxqs = 0;
+
+ for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++)
+ p_vf->vf_queues[i].rxq_active = 0;
+
+ memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
+ qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id);
+}
+
+static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
+ struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
+ struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
+ u8 i, vfpf_status = PFVF_STATUS_SUCCESS;
+ struct pf_vf_resc *resc = &resp->resc;
+
+ /* Validate FW compatibility */
+ if (req->vfdev_info.fw_major != FW_MAJOR_VERSION ||
+ req->vfdev_info.fw_minor != FW_MINOR_VERSION ||
+ req->vfdev_info.fw_revision != FW_REVISION_VERSION ||
+ req->vfdev_info.fw_engineering != FW_ENGINEERING_VERSION) {
+ DP_INFO(p_hwfn,
+ "VF[%d] is running an incompatible driver [VF needs FW %02x:%02x:%02x:%02x but Hypervisor is using %02x:%02x:%02x:%02x]\n",
+ vf->abs_vf_id,
+ req->vfdev_info.fw_major,
+ req->vfdev_info.fw_minor,
+ req->vfdev_info.fw_revision,
+ req->vfdev_info.fw_engineering,
+ FW_MAJOR_VERSION,
+ FW_MINOR_VERSION,
+ FW_REVISION_VERSION, FW_ENGINEERING_VERSION);
+ vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
+ goto out;
+ }
+
+ /* On 100g PFs, prevent old VFs from loading */
+ if ((p_hwfn->cdev->num_hwfns > 1) &&
+ !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
+ DP_INFO(p_hwfn,
+ "VF[%d] is running an old driver that doesn't support 100g\n",
+ vf->abs_vf_id);
+ vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
+ goto out;
+ }
+
+ memset(resp, 0, sizeof(*resp));
+
+ /* Fill in vf info stuff */
+ vf->opaque_fid = req->vfdev_info.opaque_fid;
+ vf->num_mac_filters = 1;
+ vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
+
+ vf->vf_bulletin = req->bulletin_addr;
+ vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
+ vf->bulletin.size : req->bulletin_size;
+
+ /* fill in pfdev info */
+ pfdev_info->chip_num = p_hwfn->cdev->chip_num;
+ pfdev_info->db_size = 0;
+ pfdev_info->indices_per_sb = PIS_PER_SB;
+
+ pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
+ PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
+ if (p_hwfn->cdev->num_hwfns > 1)
+ pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
+
+ pfdev_info->stats_info.mstats.address =
+ PXP_VF_BAR0_START_MSDM_ZONE_B +
+ offsetof(struct mstorm_vf_zone, non_trigger.eth_queue_stat);
+ pfdev_info->stats_info.mstats.len =
+ sizeof(struct eth_mstorm_per_queue_stat);
+
+ pfdev_info->stats_info.ustats.address =
+ PXP_VF_BAR0_START_USDM_ZONE_B +
+ offsetof(struct ustorm_vf_zone, non_trigger.eth_queue_stat);
+ pfdev_info->stats_info.ustats.len =
+ sizeof(struct eth_ustorm_per_queue_stat);
+
+ pfdev_info->stats_info.pstats.address =
+ PXP_VF_BAR0_START_PSDM_ZONE_B +
+ offsetof(struct pstorm_vf_zone, non_trigger.eth_queue_stat);
+ pfdev_info->stats_info.pstats.len =
+ sizeof(struct eth_pstorm_per_queue_stat);
+
+ pfdev_info->stats_info.tstats.address = 0;
+ pfdev_info->stats_info.tstats.len = 0;
+
+ memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
+
+ pfdev_info->fw_major = FW_MAJOR_VERSION;
+ pfdev_info->fw_minor = FW_MINOR_VERSION;
+ pfdev_info->fw_rev = FW_REVISION_VERSION;
+ pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
+ pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX;
+ qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL);
+
+ pfdev_info->dev_type = p_hwfn->cdev->type;
+ pfdev_info->chip_rev = p_hwfn->cdev->chip_rev;
+
+ resc->num_rxqs = vf->num_rxqs;
+ resc->num_txqs = vf->num_txqs;
+ resc->num_sbs = vf->num_sbs;
+ for (i = 0; i < resc->num_sbs; i++) {
+ resc->hw_sbs[i].hw_sb_id = vf->igu_sbs[i];
+ resc->hw_sbs[i].sb_qid = 0;
+ }
+
+ for (i = 0; i < resc->num_rxqs; i++) {
+ qed_fw_l2_queue(p_hwfn, vf->vf_queues[i].fw_rx_qid,
+ (u16 *)&resc->hw_qid[i]);
+ resc->cid[i] = vf->vf_queues[i].fw_cid;
+ }
+
+ resc->num_mac_filters = min_t(u8, vf->num_mac_filters,
+ req->resc_request.num_mac_filters);
+ resc->num_vlan_filters = min_t(u8, vf->num_vlan_filters,
+ req->resc_request.num_vlan_filters);
+
+ /* This isn't really required as VF isn't limited, but some VFs might
+ * actually test this value, so need to provide it.
+ */
+ resc->num_mc_filters = req->resc_request.num_mc_filters;
+
+ /* Fill agreed size of bulletin board in response */
+ resp->bulletin_size = vf->bulletin.size;
+ qed_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n"
+ "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n",
+ vf->abs_vf_id,
+ resp->pfdev_info.chip_num,
+ resp->pfdev_info.db_size,
+ resp->pfdev_info.indices_per_sb,
+ resp->pfdev_info.capabilities,
+ resc->num_rxqs,
+ resc->num_txqs,
+ resc->num_sbs,
+ resc->num_mac_filters,
+ resc->num_vlan_filters);
+ vf->state = VF_ACQUIRED;
+
+ /* Prepare Response */
+out:
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
+ sizeof(struct pfvf_acquire_resp_tlv), vfpf_status);
+}
+
+static int __qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf, bool val)
+{
+ struct qed_sp_vport_update_params params;
+ int rc;
+
+ if (val == p_vf->spoof_chk) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Spoofchk value[%d] is already configured\n", val);
+ return 0;
+ }
+
+ memset(&params, 0, sizeof(struct qed_sp_vport_update_params));
+ params.opaque_fid = p_vf->opaque_fid;
+ params.vport_id = p_vf->vport_id;
+ params.update_anti_spoofing_en_flg = 1;
+ params.anti_spoofing_en = val;
+
+ rc = qed_sp_vport_update(p_hwfn, &params, QED_SPQ_MODE_EBLOCK, NULL);
+ if (rc) {
+ p_vf->spoof_chk = val;
+ p_vf->req_spoofchk_val = p_vf->spoof_chk;
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Spoofchk val[%d] configured\n", val);
+ } else {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Spoofchk configuration[val:%d] failed for VF[%d]\n",
+ val, p_vf->relative_vf_id);
+ }
+
+ return rc;
+}
+
+static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf)
+{
+ struct qed_filter_ucast filter;
+ int rc = 0;
+ int i;
+
+ memset(&filter, 0, sizeof(filter));
+ filter.is_rx_filter = 1;
+ filter.is_tx_filter = 1;
+ filter.vport_to_add_to = p_vf->vport_id;
+ filter.opcode = QED_FILTER_ADD;
+
+ /* Reconfigure vlans */
+ for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
+ if (!p_vf->shadow_config.vlans[i].used)
+ continue;
+
+ filter.type = QED_FILTER_VLAN;
+ filter.vlan = p_vf->shadow_config.vlans[i].vid;
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
+ filter.vlan, p_vf->relative_vf_id);
+ rc = qed_sp_eth_filter_ucast(p_hwfn,
+ p_vf->opaque_fid,
+ &filter,
+ QED_SPQ_MODE_CB, NULL);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed to configure VLAN [%04x] to VF [%04x]\n",
+ filter.vlan, p_vf->relative_vf_id);
+ break;
+ }
+ }
+
+ return rc;
+}
+
+static int
+qed_iov_reconfigure_unicast_shadow(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf, u64 events)
+{
+ int rc = 0;
+
+ if ((events & (1 << VLAN_ADDR_FORCED)) &&
+ !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
+ rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
+
+ return rc;
+}
+
+static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf, u64 events)
+{
+ int rc = 0;
+ struct qed_filter_ucast filter;
+
+ if (!p_vf->vport_instance)
+ return -EINVAL;
+
+ if (events & (1 << MAC_ADDR_FORCED)) {
+ /* Since there's no way [currently] of removing the MAC,
+ * we can always assume this means we need to force it.
+ */
+ memset(&filter, 0, sizeof(filter));
+ filter.type = QED_FILTER_MAC;
+ filter.opcode = QED_FILTER_REPLACE;
+ filter.is_rx_filter = 1;
+ filter.is_tx_filter = 1;
+ filter.vport_to_add_to = p_vf->vport_id;
+ ether_addr_copy(filter.mac, p_vf->bulletin.p_virt->mac);
+
+ rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
+ &filter, QED_SPQ_MODE_CB, NULL);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "PF failed to configure MAC for VF\n");
+ return rc;
+ }
+
+ p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
+ }
+
+ if (events & (1 << VLAN_ADDR_FORCED)) {
+ struct qed_sp_vport_update_params vport_update;
+ u8 removal;
+ int i;
+
+ memset(&filter, 0, sizeof(filter));
+ filter.type = QED_FILTER_VLAN;
+ filter.is_rx_filter = 1;
+ filter.is_tx_filter = 1;
+ filter.vport_to_add_to = p_vf->vport_id;
+ filter.vlan = p_vf->bulletin.p_virt->pvid;
+ filter.opcode = filter.vlan ? QED_FILTER_REPLACE :
+ QED_FILTER_FLUSH;
+
+ /* Send the ramrod */
+ rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
+ &filter, QED_SPQ_MODE_CB, NULL);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "PF failed to configure VLAN for VF\n");
+ return rc;
+ }
+
+ /* Update the default-vlan & silent vlan stripping */
+ memset(&vport_update, 0, sizeof(vport_update));
+ vport_update.opaque_fid = p_vf->opaque_fid;
+ vport_update.vport_id = p_vf->vport_id;
+ vport_update.update_default_vlan_enable_flg = 1;
+ vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0;
+ vport_update.update_default_vlan_flg = 1;
+ vport_update.default_vlan = filter.vlan;
+
+ vport_update.update_inner_vlan_removal_flg = 1;
+ removal = filter.vlan ? 1
+ : p_vf->shadow_config.inner_vlan_removal;
+ vport_update.inner_vlan_removal_flg = removal;
+ vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0;
+ rc = qed_sp_vport_update(p_hwfn,
+ &vport_update,
+ QED_SPQ_MODE_EBLOCK, NULL);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "PF failed to configure VF vport for vlan\n");
+ return rc;
+ }
+
+ /* Update all the Rx queues */
+ for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
+ u16 qid;
+
+ if (!p_vf->vf_queues[i].rxq_active)
+ continue;
+
+ qid = p_vf->vf_queues[i].fw_rx_qid;
+
+ rc = qed_sp_eth_rx_queues_update(p_hwfn, qid,
+ 1, 0, 1,
+ QED_SPQ_MODE_EBLOCK,
+ NULL);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed to send Rx update fo queue[0x%04x]\n",
+ qid);
+ return rc;
+ }
+ }
+
+ if (filter.vlan)
+ p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
+ else
+ p_vf->configured_features &= ~(1 << VLAN_ADDR_FORCED);
+ }
+
+ /* If forced features are terminated, we need to configure the shadow
+ * configuration back again.
+ */
+ if (events)
+ qed_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events);
+
+ return rc;
+}
+
+static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ struct qed_sp_vport_start_params params = { 0 };
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct vfpf_vport_start_tlv *start;
+ u8 status = PFVF_STATUS_SUCCESS;
+ struct qed_vf_info *vf_info;
+ u64 *p_bitmap;
+ int sb_id;
+ int rc;
+
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vf->relative_vf_id, true);
+ if (!vf_info) {
+ DP_NOTICE(p_hwfn->cdev,
+ "Failed to get VF info, invalid vfid [%d]\n",
+ vf->relative_vf_id);
+ return;
+ }
+
+ vf->state = VF_ENABLED;
+ start = &mbx->req_virt->start_vport;
+
+ /* Initialize Status block in CAU */
+ for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
+ if (!start->sb_addr[sb_id]) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%d] did not fill the address of SB %d\n",
+ vf->relative_vf_id, sb_id);
+ break;
+ }
+
+ qed_int_cau_conf_sb(p_hwfn, p_ptt,
+ start->sb_addr[sb_id],
+ vf->igu_sbs[sb_id],
+ vf->abs_vf_id, 1);
+ }
+ qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
+
+ vf->mtu = start->mtu;
+ vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
+
+ /* Take into consideration configuration forced by hypervisor;
+ * If none is configured, use the supplied VF values [for old
+ * vfs that would still be fine, since they passed '0' as padding].
+ */
+ p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
+ if (!(*p_bitmap & (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
+ u8 vf_req = start->only_untagged;
+
+ vf_info->bulletin.p_virt->default_only_untagged = vf_req;
+ *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
+ }
+
+ params.tpa_mode = start->tpa_mode;
+ params.remove_inner_vlan = start->inner_vlan_removal;
+ params.tx_switching = true;
+
+ params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged;
+ params.drop_ttl0 = false;
+ params.concrete_fid = vf->concrete_fid;
+ params.opaque_fid = vf->opaque_fid;
+ params.vport_id = vf->vport_id;
+ params.max_buffers_per_cqe = start->max_buffers_per_cqe;
+ params.mtu = vf->mtu;
+
+ rc = qed_sp_eth_vport_start(p_hwfn, &params);
+ if (rc != 0) {
+ DP_ERR(p_hwfn,
+ "qed_iov_vf_mbx_start_vport returned error %d\n", rc);
+ status = PFVF_STATUS_FAILURE;
+ } else {
+ vf->vport_instance++;
+
+ /* Force configuration if needed on the newly opened vport */
+ qed_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap);
+
+ __qed_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val);
+ }
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
+ sizeof(struct pfvf_def_resp_tlv), status);
+}
+
+static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ u8 status = PFVF_STATUS_SUCCESS;
+ int rc;
+
+ vf->vport_instance--;
+ vf->spoof_chk = false;
+
+ rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
+ if (rc != 0) {
+ DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n",
+ rc);
+ status = PFVF_STATUS_FAILURE;
+ }
+
+ /* Forget the configuration on the vport */
+ vf->configured_features = 0;
+ memset(&vf->shadow_config, 0, sizeof(vf->shadow_config));
+
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
+ sizeof(struct pfvf_def_resp_tlv), status);
+}
+
+#define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A
+#define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
+ (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
+
+static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf, u8 status)
+{
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct pfvf_start_queue_resp_tlv *p_tlv;
+ struct vfpf_start_rxq_tlv *req;
+
+ mbx->offset = (u8 *)mbx->reply_virt;
+
+ p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
+ sizeof(*p_tlv));
+ qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* Update the TLV with the response */
+ if (status == PFVF_STATUS_SUCCESS) {
+ u16 hw_qid = 0;
+
+ req = &mbx->req_virt->start_rxq;
+ qed_fw_l2_queue(p_hwfn, vf->vf_queues[req->rx_qid].fw_rx_qid,
+ &hw_qid);
+
+ p_tlv->offset = MSTORM_QZONE_START(p_hwfn->cdev) +
+ hw_qid * MSTORM_QZONE_SIZE +
+ offsetof(struct mstorm_eth_queue_zone,
+ rx_producers);
+ }
+
+ qed_iov_send_response(p_hwfn, p_ptt, vf, sizeof(*p_tlv), status);
+}
+
+static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ struct qed_queue_start_common_params params;
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ u8 status = PFVF_STATUS_SUCCESS;
+ struct vfpf_start_rxq_tlv *req;
+ int rc;
+
+ memset(&params, 0, sizeof(params));
+ req = &mbx->req_virt->start_rxq;
+ params.queue_id = vf->vf_queues[req->rx_qid].fw_rx_qid;
+ params.vport_id = vf->vport_id;
+ params.sb = req->hw_sb;
+ params.sb_idx = req->sb_index;
+
+ rc = qed_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid,
+ vf->vf_queues[req->rx_qid].fw_cid,
+ &params,
+ vf->abs_vf_id + 0x10,
+ req->bd_max_bytes,
+ req->rxq_addr,
+ req->cqe_pbl_addr, req->cqe_pbl_size);
+
+ if (rc) {
+ status = PFVF_STATUS_FAILURE;
+ } else {
+ vf->vf_queues[req->rx_qid].rxq_active = true;
+ vf->num_active_rxqs++;
+ }
+
+ qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status);
+}
+
+static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ u16 length = sizeof(struct pfvf_def_resp_tlv);
+ struct qed_queue_start_common_params params;
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ union qed_qm_pq_params pq_params;
+ u8 status = PFVF_STATUS_SUCCESS;
+ struct vfpf_start_txq_tlv *req;
+ int rc;
+
+ /* Prepare the parameters which would choose the right PQ */
+ memset(&pq_params, 0, sizeof(pq_params));
+ pq_params.eth.is_vf = 1;
+ pq_params.eth.vf_id = vf->relative_vf_id;
+
+ memset(&params, 0, sizeof(params));
+ req = &mbx->req_virt->start_txq;
+ params.queue_id = vf->vf_queues[req->tx_qid].fw_tx_qid;
+ params.vport_id = vf->vport_id;
+ params.sb = req->hw_sb;
+ params.sb_idx = req->sb_index;
+
+ rc = qed_sp_eth_txq_start_ramrod(p_hwfn,
+ vf->opaque_fid,
+ vf->vf_queues[req->tx_qid].fw_cid,
+ &params,
+ vf->abs_vf_id + 0x10,
+ req->pbl_addr,
+ req->pbl_size, &pq_params);
+
+ if (rc)
+ status = PFVF_STATUS_FAILURE;
+ else
+ vf->vf_queues[req->tx_qid].txq_active = true;
+
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_START_TXQ,
+ length, status);
+}
+
+static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *vf,
+ u16 rxq_id, u8 num_rxqs, bool cqe_completion)
+{
+ int rc = 0;
+ int qid;
+
+ if (rxq_id + num_rxqs > ARRAY_SIZE(vf->vf_queues))
+ return -EINVAL;
+
+ for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {
+ if (vf->vf_queues[qid].rxq_active) {
+ rc = qed_sp_eth_rx_queue_stop(p_hwfn,
+ vf->vf_queues[qid].
+ fw_rx_qid, false,
+ cqe_completion);
+
+ if (rc)
+ return rc;
+ }
+ vf->vf_queues[qid].rxq_active = false;
+ vf->num_active_rxqs--;
+ }
+
+ return rc;
+}
+
+static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *vf, u16 txq_id, u8 num_txqs)
+{
+ int rc = 0;
+ int qid;
+
+ if (txq_id + num_txqs > ARRAY_SIZE(vf->vf_queues))
+ return -EINVAL;
+
+ for (qid = txq_id; qid < txq_id + num_txqs; qid++) {
+ if (vf->vf_queues[qid].txq_active) {
+ rc = qed_sp_eth_tx_queue_stop(p_hwfn,
+ vf->vf_queues[qid].
+ fw_tx_qid);
+
+ if (rc)
+ return rc;
+ }
+ vf->vf_queues[qid].txq_active = false;
+ }
+ return rc;
+}
+
+static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ u16 length = sizeof(struct pfvf_def_resp_tlv);
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ u8 status = PFVF_STATUS_SUCCESS;
+ struct vfpf_stop_rxqs_tlv *req;
+ int rc;
+
+ /* We give the option of starting from qid != 0, in this case we
+ * need to make sure that qid + num_qs doesn't exceed the actual
+ * amount of queues that exist.
+ */
+ req = &mbx->req_virt->stop_rxqs;
+ rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
+ req->num_rxqs, req->cqe_completion);
+ if (rc)
+ status = PFVF_STATUS_FAILURE;
+
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
+ length, status);
+}
+
+static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ u16 length = sizeof(struct pfvf_def_resp_tlv);
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ u8 status = PFVF_STATUS_SUCCESS;
+ struct vfpf_stop_txqs_tlv *req;
+ int rc;
+
+ /* We give the option of starting from qid != 0, in this case we
+ * need to make sure that qid + num_qs doesn't exceed the actual
+ * amount of queues that exist.
+ */
+ req = &mbx->req_virt->stop_txqs;
+ rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs);
+ if (rc)
+ status = PFVF_STATUS_FAILURE;
+
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
+ length, status);
+}
+
+static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ u16 length = sizeof(struct pfvf_def_resp_tlv);
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct vfpf_update_rxq_tlv *req;
+ u8 status = PFVF_STATUS_SUCCESS;
+ u8 complete_event_flg;
+ u8 complete_cqe_flg;
+ u16 qid;
+ int rc;
+ u8 i;
+
+ req = &mbx->req_virt->update_rxq;
+ complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
+ complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
+
+ for (i = 0; i < req->num_rxqs; i++) {
+ qid = req->rx_qid + i;
+
+ if (!vf->vf_queues[qid].rxq_active) {
+ DP_NOTICE(p_hwfn, "VF rx_qid = %d isn`t active!\n",
+ qid);
+ status = PFVF_STATUS_FAILURE;
+ break;
+ }
+
+ rc = qed_sp_eth_rx_queues_update(p_hwfn,
+ vf->vf_queues[qid].fw_rx_qid,
+ 1,
+ complete_cqe_flg,
+ complete_event_flg,
+ QED_SPQ_MODE_EBLOCK, NULL);
+
+ if (rc) {
+ status = PFVF_STATUS_FAILURE;
+ break;
+ }
+ }
+
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
+ length, status);
+}
+
+void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
+ void *p_tlvs_list, u16 req_type)
+{
+ struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
+ int len = 0;
+
+ do {
+ if (!p_tlv->length) {
+ DP_NOTICE(p_hwfn, "Zero length TLV found\n");
+ return NULL;
+ }
+
+ if (p_tlv->type == req_type) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Extended tlv type %d, length %d found\n",
+ p_tlv->type, p_tlv->length);
+ return p_tlv;
+ }
+
+ len += p_tlv->length;
+ p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
+
+ if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
+ DP_NOTICE(p_hwfn, "TLVs has overrun the buffer size\n");
+ return NULL;
+ }
+ } while (p_tlv->type != CHANNEL_TLV_LIST_END);
+
+ return NULL;
+}
+
+static void
+qed_iov_vp_update_act_param(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_data,
+ struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_activate_tlv *p_act_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
+
+ p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+ if (!p_act_tlv)
+ return;
+
+ p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
+ p_data->vport_active_rx_flg = p_act_tlv->active_rx;
+ p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
+ p_data->vport_active_tx_flg = p_act_tlv->active_tx;
+ *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACTIVATE;
+}
+
+static void
+qed_iov_vp_update_vlan_param(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_data,
+ struct qed_vf_info *p_vf,
+ struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
+
+ p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+ if (!p_vlan_tlv)
+ return;
+
+ p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
+
+ /* Ignore the VF request if we're forcing a vlan */
+ if (!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) {
+ p_data->update_inner_vlan_removal_flg = 1;
+ p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
+ }
+
+ *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_VLAN_STRIP;
+}
+
+static void
+qed_iov_vp_update_tx_switch(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_data,
+ struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
+
+ p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
+ tlv);
+ if (!p_tx_switch_tlv)
+ return;
+
+ p_data->update_tx_switching_flg = 1;
+ p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
+ *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_TX_SWITCH;
+}
+
+static void
+qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_data,
+ struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
+
+ p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+ if (!p_mcast_tlv)
+ return;
+
+ p_data->update_approx_mcast_flg = 1;
+ memcpy(p_data->bins, p_mcast_tlv->bins,
+ sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
+ *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST;
+}
+
+static void
+qed_iov_vp_update_accept_flag(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_data,
+ struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct qed_filter_accept_flags *p_flags = &p_data->accept_flags;
+ struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
+
+ p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+ if (!p_accept_tlv)
+ return;
+
+ p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
+ p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
+ p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
+ p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
+ *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM;
+}
+
+static void
+qed_iov_vp_update_accept_any_vlan(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_data,
+ struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
+
+ p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
+ tlv);
+ if (!p_accept_any_vlan)
+ return;
+
+ p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
+ p_data->update_accept_any_vlan_flg =
+ p_accept_any_vlan->update_accept_any_vlan_flg;
+ *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
+}
+
+static void
+qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *vf,
+ struct qed_sp_vport_update_params *p_data,
+ struct qed_rss_params *p_rss,
+ struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_rss_tlv *p_rss_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
+ u16 i, q_idx, max_q_idx;
+ u16 table_size;
+
+ p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+ if (!p_rss_tlv) {
+ p_data->rss_params = NULL;
+ return;
+ }
+
+ memset(p_rss, 0, sizeof(struct qed_rss_params));
+
+ p_rss->update_rss_config = !!(p_rss_tlv->update_rss_flags &
+ VFPF_UPDATE_RSS_CONFIG_FLAG);
+ p_rss->update_rss_capabilities = !!(p_rss_tlv->update_rss_flags &
+ VFPF_UPDATE_RSS_CAPS_FLAG);
+ p_rss->update_rss_ind_table = !!(p_rss_tlv->update_rss_flags &
+ VFPF_UPDATE_RSS_IND_TABLE_FLAG);
+ p_rss->update_rss_key = !!(p_rss_tlv->update_rss_flags &
+ VFPF_UPDATE_RSS_KEY_FLAG);
+
+ p_rss->rss_enable = p_rss_tlv->rss_enable;
+ p_rss->rss_eng_id = vf->relative_vf_id + 1;
+ p_rss->rss_caps = p_rss_tlv->rss_caps;
+ p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
+ memcpy(p_rss->rss_ind_table, p_rss_tlv->rss_ind_table,
+ sizeof(p_rss->rss_ind_table));
+ memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key));
+
+ table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table),
+ (1 << p_rss_tlv->rss_table_size_log));
+
+ max_q_idx = ARRAY_SIZE(vf->vf_queues);
+
+ for (i = 0; i < table_size; i++) {
+ u16 index = vf->vf_queues[0].fw_rx_qid;
+
+ q_idx = p_rss->rss_ind_table[i];
+ if (q_idx >= max_q_idx)
+ DP_NOTICE(p_hwfn,
+ "rss_ind_table[%d] = %d, rxq is out of range\n",
+ i, q_idx);
+ else if (!vf->vf_queues[q_idx].rxq_active)
+ DP_NOTICE(p_hwfn,
+ "rss_ind_table[%d] = %d, rxq is not active\n",
+ i, q_idx);
+ else
+ index = vf->vf_queues[q_idx].fw_rx_qid;
+ p_rss->rss_ind_table[i] = index;
+ }
+
+ p_data->rss_params = p_rss;
+ *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS;
+}
+
+static void
+qed_iov_vp_update_sge_tpa_param(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *vf,
+ struct qed_sp_vport_update_params *p_data,
+ struct qed_sge_tpa_params *p_sge_tpa,
+ struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
+
+ p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+
+ if (!p_sge_tpa_tlv) {
+ p_data->sge_tpa_params = NULL;
+ return;
+ }
+
+ memset(p_sge_tpa, 0, sizeof(struct qed_sge_tpa_params));
+
+ p_sge_tpa->update_tpa_en_flg =
+ !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG);
+ p_sge_tpa->update_tpa_param_flg =
+ !!(p_sge_tpa_tlv->update_sge_tpa_flags &
+ VFPF_UPDATE_TPA_PARAM_FLAG);
+
+ p_sge_tpa->tpa_ipv4_en_flg =
+ !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG);
+ p_sge_tpa->tpa_ipv6_en_flg =
+ !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG);
+ p_sge_tpa->tpa_pkt_split_flg =
+ !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG);
+ p_sge_tpa->tpa_hdr_data_split_flg =
+ !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG);
+ p_sge_tpa->tpa_gro_consistent_flg =
+ !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG);
+
+ p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num;
+ p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size;
+ p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start;
+ p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont;
+ p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe;
+
+ p_data->sge_tpa_params = p_sge_tpa;
+
+ *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_SGE_TPA;
+}
+
+static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ struct qed_sp_vport_update_params params;
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct qed_sge_tpa_params sge_tpa_params;
+ struct qed_rss_params rss_params;
+ u8 status = PFVF_STATUS_SUCCESS;
+ u16 tlvs_mask = 0;
+ u16 length;
+ int rc;
+
+ memset(&params, 0, sizeof(params));
+ params.opaque_fid = vf->opaque_fid;
+ params.vport_id = vf->vport_id;
+ params.rss_params = NULL;
+
+ /* Search for extended tlvs list and update values
+ * from VF in struct qed_sp_vport_update_params.
+ */
+ qed_iov_vp_update_act_param(p_hwfn, &params, mbx, &tlvs_mask);
+ qed_iov_vp_update_vlan_param(p_hwfn, &params, vf, mbx, &tlvs_mask);
+ qed_iov_vp_update_tx_switch(p_hwfn, &params, mbx, &tlvs_mask);
+ qed_iov_vp_update_mcast_bin_param(p_hwfn, &params, mbx, &tlvs_mask);
+ qed_iov_vp_update_accept_flag(p_hwfn, &params, mbx, &tlvs_mask);
+ qed_iov_vp_update_rss_param(p_hwfn, vf, &params, &rss_params,
+ mbx, &tlvs_mask);
+ qed_iov_vp_update_accept_any_vlan(p_hwfn, &params, mbx, &tlvs_mask);
+ qed_iov_vp_update_sge_tpa_param(p_hwfn, vf, &params,
+ &sge_tpa_params, mbx, &tlvs_mask);
+
+ /* Just log a message if there is no single extended tlv in buffer.
+ * When all features of vport update ramrod would be requested by VF
+ * as extended TLVs in buffer then an error can be returned in response
+ * if there is no extended TLV present in buffer.
+ */
+ if (!tlvs_mask) {
+ DP_NOTICE(p_hwfn,
+ "No feature tlvs found for vport update\n");
+ status = PFVF_STATUS_NOT_SUPPORTED;
+ goto out;
+ }
+
+ rc = qed_sp_vport_update(p_hwfn, &params, QED_SPQ_MODE_EBLOCK, NULL);
+
+ if (rc)
+ status = PFVF_STATUS_FAILURE;
+
+out:
+ length = qed_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
+ tlvs_mask, tlvs_mask);
+ qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
+}
+
+static int qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf,
+ struct qed_filter_ucast *p_params)
+{
+ int i;
+
+ if (p_params->type == QED_FILTER_MAC)
+ return 0;
+
+ /* First remove entries and then add new ones */
+ if (p_params->opcode == QED_FILTER_REMOVE) {
+ for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
+ if (p_vf->shadow_config.vlans[i].used &&
+ p_vf->shadow_config.vlans[i].vid ==
+ p_params->vlan) {
+ p_vf->shadow_config.vlans[i].used = false;
+ break;
+ }
+ if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF [%d] - Tries to remove a non-existing vlan\n",
+ p_vf->relative_vf_id);
+ return -EINVAL;
+ }
+ } else if (p_params->opcode == QED_FILTER_REPLACE ||
+ p_params->opcode == QED_FILTER_FLUSH) {
+ for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
+ p_vf->shadow_config.vlans[i].used = false;
+ }
+
+ /* In forced mode, we're willing to remove entries - but we don't add
+ * new ones.
+ */
+ if (p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED))
+ return 0;
+
+ if (p_params->opcode == QED_FILTER_ADD ||
+ p_params->opcode == QED_FILTER_REPLACE) {
+ for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
+ if (p_vf->shadow_config.vlans[i].used)
+ continue;
+
+ p_vf->shadow_config.vlans[i].used = true;
+ p_vf->shadow_config.vlans[i].vid = p_params->vlan;
+ break;
+ }
+
+ if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF [%d] - Tries to configure more than %d vlan filters\n",
+ p_vf->relative_vf_id,
+ QED_ETH_VF_NUM_VLAN_FILTERS + 1);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int qed_iov_chk_ucast(struct qed_hwfn *hwfn,
+ int vfid, struct qed_filter_ucast *params)
+{
+ struct qed_public_vf_info *vf;
+
+ vf = qed_iov_get_public_vf_info(hwfn, vfid, true);
+ if (!vf)
+ return -EINVAL;
+
+ /* No real decision to make; Store the configured MAC */
+ if (params->type == QED_FILTER_MAC ||
+ params->type == QED_FILTER_MAC_VLAN)
+ ether_addr_copy(vf->mac, params->mac);
+
+ return 0;
+}
+
+static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ struct qed_bulletin_content *p_bulletin = vf->bulletin.p_virt;
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct vfpf_ucast_filter_tlv *req;
+ u8 status = PFVF_STATUS_SUCCESS;
+ struct qed_filter_ucast params;
+ int rc;
+
+ /* Prepare the unicast filter params */
+ memset(&params, 0, sizeof(struct qed_filter_ucast));
+ req = &mbx->req_virt->ucast_filter;
+ params.opcode = (enum qed_filter_opcode)req->opcode;
+ params.type = (enum qed_filter_ucast_type)req->type;
+
+ params.is_rx_filter = 1;
+ params.is_tx_filter = 1;
+ params.vport_to_remove_from = vf->vport_id;
+ params.vport_to_add_to = vf->vport_id;
+ memcpy(params.mac, req->mac, ETH_ALEN);
+ params.vlan = req->vlan;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
+ vf->abs_vf_id, params.opcode, params.type,
+ params.is_rx_filter ? "RX" : "",
+ params.is_tx_filter ? "TX" : "",
+ params.vport_to_add_to,
+ params.mac[0], params.mac[1],
+ params.mac[2], params.mac[3],
+ params.mac[4], params.mac[5], params.vlan);
+
+ if (!vf->vport_instance) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "No VPORT instance available for VF[%d], failing ucast MAC configuration\n",
+ vf->abs_vf_id);
+ status = PFVF_STATUS_FAILURE;
+ goto out;
+ }
+
+ /* Update shadow copy of the VF configuration */
+ if (qed_iov_vf_update_unicast_shadow(p_hwfn, vf, &params)) {
+ status = PFVF_STATUS_FAILURE;
+ goto out;
+ }
+
+ /* Determine if the unicast filtering is acceptible by PF */
+ if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) &&
+ (params.type == QED_FILTER_VLAN ||
+ params.type == QED_FILTER_MAC_VLAN)) {
+ /* Once VLAN is forced or PVID is set, do not allow
+ * to add/replace any further VLANs.
+ */
+ if (params.opcode == QED_FILTER_ADD ||
+ params.opcode == QED_FILTER_REPLACE)
+ status = PFVF_STATUS_FORCED;
+ goto out;
+ }
+
+ if ((p_bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) &&
+ (params.type == QED_FILTER_MAC ||
+ params.type == QED_FILTER_MAC_VLAN)) {
+ if (!ether_addr_equal(p_bulletin->mac, params.mac) ||
+ (params.opcode != QED_FILTER_ADD &&
+ params.opcode != QED_FILTER_REPLACE))
+ status = PFVF_STATUS_FORCED;
+ goto out;
+ }
+
+ rc = qed_iov_chk_ucast(p_hwfn, vf->relative_vf_id, &params);
+ if (rc) {
+ status = PFVF_STATUS_FAILURE;
+ goto out;
+ }
+
+ rc = qed_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, &params,
+ QED_SPQ_MODE_CB, NULL);
+ if (rc)
+ status = PFVF_STATUS_FAILURE;
+
+out:
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
+ sizeof(struct pfvf_def_resp_tlv), status);
+}
+
+static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ int i;
+
+ /* Reset the SBs */
+ for (i = 0; i < vf->num_sbs; i++)
+ qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
+ vf->igu_sbs[i],
+ vf->opaque_fid, false);
+
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
+ sizeof(struct pfvf_def_resp_tlv),
+ PFVF_STATUS_SUCCESS);
+}
+
+static void qed_iov_vf_mbx_close(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, struct qed_vf_info *vf)
+{
+ u16 length = sizeof(struct pfvf_def_resp_tlv);
+ u8 status = PFVF_STATUS_SUCCESS;
+
+ /* Disable Interrupts for VF */
+ qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
+
+ /* Reset Permission table */
+ qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
+
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
+ length, status);
+}
+
+static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *p_vf)
+{
+ u16 length = sizeof(struct pfvf_def_resp_tlv);
+
+ qed_iov_vf_cleanup(p_hwfn, p_vf);
+
+ qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
+ length, PFVF_STATUS_SUCCESS);
+}
+
+static int
+qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
+{
+ int cnt;
+ u32 val;
+
+ qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid);
+
+ for (cnt = 0; cnt < 50; cnt++) {
+ val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
+ if (!val)
+ break;
+ msleep(20);
+ }
+ qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+
+ if (cnt == 50) {
+ DP_ERR(p_hwfn,
+ "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
+ p_vf->abs_vf_id, val);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
+{
+ u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];
+ int i, cnt;
+
+ /* Read initial consumers & producers */
+ for (i = 0; i < MAX_NUM_VOQS; i++) {
+ u32 prod;
+
+ cons[i] = qed_rd(p_hwfn, p_ptt,
+ PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
+ i * 0x40);
+ prod = qed_rd(p_hwfn, p_ptt,
+ PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
+ i * 0x40);
+ distance[i] = prod - cons[i];
+ }
+
+ /* Wait for consumers to pass the producers */
+ i = 0;
+ for (cnt = 0; cnt < 50; cnt++) {
+ for (; i < MAX_NUM_VOQS; i++) {
+ u32 tmp;
+
+ tmp = qed_rd(p_hwfn, p_ptt,
+ PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
+ i * 0x40);
+ if (distance[i] > tmp - cons[i])
+ break;
+ }
+
+ if (i == MAX_NUM_VOQS)
+ break;
+
+ msleep(20);
+ }
+
+ if (cnt == 50) {
+ DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
+ p_vf->abs_vf_id, i);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int qed_iov_vf_flr_poll(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
+{
+ int rc;
+
+ rc = qed_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
+ if (rc)
+ return rc;
+
+ rc = qed_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int
+qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 rel_vf_id, u32 *ack_vfs)
+{
+ struct qed_vf_info *p_vf;
+ int rc = 0;
+
+ p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
+ if (!p_vf)
+ return 0;
+
+ if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
+ (1ULL << (rel_vf_id % 64))) {
+ u16 vfid = p_vf->abs_vf_id;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%d] - Handling FLR\n", vfid);
+
+ qed_iov_vf_cleanup(p_hwfn, p_vf);
+
+ /* If VF isn't active, no need for anything but SW */
+ if (!p_vf->b_init)
+ goto cleanup;
+
+ rc = qed_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
+ if (rc)
+ goto cleanup;
+
+ rc = qed_final_cleanup(p_hwfn, p_ptt, vfid, true);
+ if (rc) {
+ DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
+ return rc;
+ }
+
+ /* VF_STOPPED has to be set only after final cleanup
+ * but prior to re-enabling the VF.
+ */
+ p_vf->state = VF_STOPPED;
+
+ rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
+ if (rc) {
+ DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
+ vfid);
+ return rc;
+ }
+cleanup:
+ /* Mark VF for ack and clean pending state */
+ if (p_vf->state == VF_RESET)
+ p_vf->state = VF_STOPPED;
+ ack_vfs[vfid / 32] |= (1 << (vfid % 32));
+ p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
+ ~(1ULL << (rel_vf_id % 64));
+ p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &=
+ ~(1ULL << (rel_vf_id % 64));
+ }
+
+ return rc;
+}
+
+int qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 ack_vfs[VF_MAX_STATIC / 32];
+ int rc = 0;
+ u16 i;
+
+ memset(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
+
+ /* Since BRB <-> PRS interface can't be tested as part of the flr
+ * polling due to HW limitations, simply sleep a bit. And since
+ * there's no need to wait per-vf, do it before looping.
+ */
+ msleep(100);
+
+ for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++)
+ qed_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
+
+ rc = qed_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
+ return rc;
+}
+
+int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
+{
+ u16 i, found = 0;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n");
+ for (i = 0; i < (VF_MAX_STATIC / 32); i++)
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "[%08x,...,%08x]: %08x\n",
+ i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
+
+ if (!p_hwfn->cdev->p_iov_info) {
+ DP_NOTICE(p_hwfn, "VF flr but no IOV\n");
+ return 0;
+ }
+
+ /* Mark VFs */
+ for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) {
+ struct qed_vf_info *p_vf;
+ u8 vfid;
+
+ p_vf = qed_iov_get_vf_info(p_hwfn, i, false);
+ if (!p_vf)
+ continue;
+
+ vfid = p_vf->abs_vf_id;
+ if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) {
+ u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
+ u16 rel_vf_id = p_vf->relative_vf_id;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%d] [rel %d] got FLR-ed\n",
+ vfid, rel_vf_id);
+
+ p_vf->state = VF_RESET;
+
+ /* No need to lock here, since pending_flr should
+ * only change here and before ACKing MFw. Since
+ * MFW will not trigger an additional attention for
+ * VF flr until ACKs, we're safe.
+ */
+ p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
+ found = 1;
+ }
+ }
+
+ return found;
+}
+
+static void qed_iov_get_link(struct qed_hwfn *p_hwfn,
+ u16 vfid,
+ struct qed_mcp_link_params *p_params,
+ struct qed_mcp_link_state *p_link,
+ struct qed_mcp_link_capabilities *p_caps)
+{
+ struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
+ vfid,
+ false);
+ struct qed_bulletin_content *p_bulletin;
+
+ if (!p_vf)
+ return;
+
+ p_bulletin = p_vf->bulletin.p_virt;
+
+ if (p_params)
+ __qed_vf_get_link_params(p_hwfn, p_params, p_bulletin);
+ if (p_link)
+ __qed_vf_get_link_state(p_hwfn, p_link, p_bulletin);
+ if (p_caps)
+ __qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
+}
+
+static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, int vfid)
+{
+ struct qed_iov_vf_mbx *mbx;
+ struct qed_vf_info *p_vf;
+ int i;
+
+ p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ if (!p_vf)
+ return;
+
+ mbx = &p_vf->vf_mbx;
+
+ /* qed_iov_process_mbx_request */
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "qed_iov_process_mbx_req vfid %d\n", p_vf->abs_vf_id);
+
+ mbx->first_tlv = mbx->req_virt->first_tlv;
+
+ /* check if tlv type is known */
+ if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
+ switch (mbx->first_tlv.tl.type) {
+ case CHANNEL_TLV_ACQUIRE:
+ qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_VPORT_START:
+ qed_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_VPORT_TEARDOWN:
+ qed_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_START_RXQ:
+ qed_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_START_TXQ:
+ qed_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_STOP_RXQS:
+ qed_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_STOP_TXQS:
+ qed_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_UPDATE_RXQ:
+ qed_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_VPORT_UPDATE:
+ qed_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_UCAST_FILTER:
+ qed_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_CLOSE:
+ qed_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_INT_CLEANUP:
+ qed_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_RELEASE:
+ qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
+ break;
+ }
+ } else {
+ /* unknown TLV - this may belong to a VF driver from the future
+ * - a version written after this PF driver was written, which
+ * supports features unknown as of yet. Too bad since we don't
+ * support them. Or this may be because someone wrote a crappy
+ * VF driver and is sending garbage over the channel.
+ */
+ DP_ERR(p_hwfn,
+ "unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\n",
+ mbx->first_tlv.tl.type, mbx->first_tlv.tl.length);
+
+ for (i = 0; i < 20; i++) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "%x ",
+ mbx->req_virt->tlv_buf_size.tlv_buffer[i]);
+ }
+ }
+}
+
+void qed_iov_pf_add_pending_events(struct qed_hwfn *p_hwfn, u8 vfid)
+{
+ u64 add_bit = 1ULL << (vfid % 64);
+
+ p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit;
+}
+
+static void qed_iov_pf_get_and_clear_pending_events(struct qed_hwfn *p_hwfn,
+ u64 *events)
+{
+ u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events;
+
+ memcpy(events, p_pending_events, sizeof(u64) * QED_VF_ARRAY_LENGTH);
+ memset(p_pending_events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH);
+}
+
+static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
+ u16 abs_vfid, struct regpair *vf_msg)
+{
+ u8 min = (u8)p_hwfn->cdev->p_iov_info->first_vf_in_pf;
+ struct qed_vf_info *p_vf;
+
+ if (!qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min)) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "Got a message from VF [abs 0x%08x] that cannot be handled by PF\n",
+ abs_vfid);
+ return 0;
+ }
+ p_vf = &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
+
+ /* List the physical address of the request so that handler
+ * could later on copy the message from it.
+ */
+ p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
+
+ /* Mark the event and schedule the workqueue */
+ qed_iov_pf_add_pending_events(p_hwfn, p_vf->relative_vf_id);
+ qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG);
+
+ return 0;
+}
+
+int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
+ u8 opcode, __le16 echo, union event_ring_data *data)
+{
+ switch (opcode) {
+ case COMMON_EVENT_VF_PF_CHANNEL:
+ return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo),
+ &data->vf_pf_channel.msg_addr);
+ default:
+ DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n",
+ opcode);
+ return -EINVAL;
+ }
+}
+
+u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
+{
+ struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
+ u16 i;
+
+ if (!p_iov)
+ goto out;
+
+ for (i = rel_vf_id; i < p_iov->total_vfs; i++)
+ if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true))
+ return i;
+
+out:
+ return MAX_NUM_VFS;
+}
+
+static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt,
+ int vfid)
+{
+ struct qed_dmae_params params;
+ struct qed_vf_info *vf_info;
+
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ if (!vf_info)
+ return -EINVAL;
+
+ memset(&params, 0, sizeof(struct qed_dmae_params));
+ params.flags = QED_DMAE_FLAG_VF_SRC | QED_DMAE_FLAG_COMPLETION_DST;
+ params.src_vfid = vf_info->abs_vf_id;
+
+ if (qed_dmae_host2host(p_hwfn, ptt,
+ vf_info->vf_mbx.pending_req,
+ vf_info->vf_mbx.req_phys,
+ sizeof(union vfpf_tlvs) / 4, &params)) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Failed to copy message from VF 0x%02x\n", vfid);
+
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn,
+ u8 *mac, int vfid)
+{
+ struct qed_vf_info *vf_info;
+ u64 feature;
+
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info) {
+ DP_NOTICE(p_hwfn->cdev,
+ "Can not set forced MAC, invalid vfid [%d]\n", vfid);
+ return;
+ }
+
+ feature = 1 << MAC_ADDR_FORCED;
+ memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
+
+ vf_info->bulletin.p_virt->valid_bitmap |= feature;
+ /* Forced MAC will disable MAC_ADDR */
+ vf_info->bulletin.p_virt->valid_bitmap &=
+ ~(1 << VFPF_BULLETIN_MAC_ADDR);
+
+ qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
+}
+
+void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
+ u16 pvid, int vfid)
+{
+ struct qed_vf_info *vf_info;
+ u64 feature;
+
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ if (!vf_info) {
+ DP_NOTICE(p_hwfn->cdev,
+ "Can not set forced MAC, invalid vfid [%d]\n", vfid);
+ return;
+ }
+
+ feature = 1 << VLAN_ADDR_FORCED;
+ vf_info->bulletin.p_virt->pvid = pvid;
+ if (pvid)
+ vf_info->bulletin.p_virt->valid_bitmap |= feature;
+ else
+ vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
+
+ qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
+}
+
+static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid)
+{
+ struct qed_vf_info *p_vf_info;
+
+ p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ if (!p_vf_info)
+ return false;
+
+ return !!p_vf_info->vport_instance;
+}
+
+bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid)
+{
+ struct qed_vf_info *p_vf_info;
+
+ p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ if (!p_vf_info)
+ return true;
+
+ return p_vf_info->state == VF_STOPPED;
+}
+
+static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid)
+{
+ struct qed_vf_info *vf_info;
+
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ if (!vf_info)
+ return false;
+
+ return vf_info->spoof_chk;
+}
+
+int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val)
+{
+ struct qed_vf_info *vf;
+ int rc = -EINVAL;
+
+ if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
+ DP_NOTICE(p_hwfn,
+ "SR-IOV sanity check failed, can't set spoofchk\n");
+ goto out;
+ }
+
+ vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ if (!vf)
+ goto out;
+
+ if (!qed_iov_vf_has_vport_instance(p_hwfn, vfid)) {
+ /* After VF VPORT start PF will configure spoof check */
+ vf->req_spoofchk_val = val;
+ rc = 0;
+ goto out;
+ }
+
+ rc = __qed_iov_spoofchk_set(p_hwfn, vf, val);
+
+out:
+ return rc;
+}
+
+static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn,
+ u16 rel_vf_id)
+{
+ struct qed_vf_info *p_vf;
+
+ p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf || !p_vf->bulletin.p_virt)
+ return NULL;
+
+ if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)))
+ return NULL;
+
+ return p_vf->bulletin.p_virt->mac;
+}
+
+u16 qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
+{
+ struct qed_vf_info *p_vf;
+
+ p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf || !p_vf->bulletin.p_virt)
+ return 0;
+
+ if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED)))
+ return 0;
+
+ return p_vf->bulletin.p_virt->pvid;
+}
+
+static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, int vfid, int val)
+{
+ struct qed_vf_info *vf;
+ u8 abs_vp_id = 0;
+ int rc;
+
+ vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf)
+ return -EINVAL;
+
+ rc = qed_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id);
+ if (rc)
+ return rc;
+
+ return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
+}
+
+int qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate)
+{
+ struct qed_vf_info *vf;
+ u8 vport_id;
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
+ DP_NOTICE(p_hwfn,
+ "SR-IOV sanity check failed, can't set min rate\n");
+ return -EINVAL;
+ }
+ }
+
+ vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true);
+ vport_id = vf->vport_id;
+
+ return qed_configure_vport_wfq(cdev, vport_id, rate);
+}
+
+static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid)
+{
+ struct qed_wfq_data *vf_vp_wfq;
+ struct qed_vf_info *vf_info;
+
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ if (!vf_info)
+ return 0;
+
+ vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id];
+
+ if (vf_vp_wfq->configured)
+ return vf_vp_wfq->min_speed;
+ else
+ return 0;
+}
+
+/**
+ * qed_schedule_iov - schedules IOV task for VF and PF
+ * @hwfn: hardware function pointer
+ * @flag: IOV flag for VF/PF
+ */
+void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag)
+{
+ smp_mb__before_atomic();
+ set_bit(flag, &hwfn->iov_task_flags);
+ smp_mb__after_atomic();
+ DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
+ queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0);
+}
+
+void qed_vf_start_iov_wq(struct qed_dev *cdev)
+{
+ int i;
+
+ for_each_hwfn(cdev, i)
+ queue_delayed_work(cdev->hwfns[i].iov_wq,
+ &cdev->hwfns[i].iov_task, 0);
+}
+
+int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
+{
+ int i, j;
+
+ for_each_hwfn(cdev, i)
+ if (cdev->hwfns[i].iov_wq)
+ flush_workqueue(cdev->hwfns[i].iov_wq);
+
+ /* Mark VFs for disablement */
+ qed_iov_set_vfs_to_disable(cdev, true);
+
+ if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled)
+ pci_disable_sriov(cdev->pdev);
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *hwfn = &cdev->hwfns[i];
+ struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
+
+ /* Failure to acquire the ptt in 100g creates an odd error
+ * where the first engine has already relased IOV.
+ */
+ if (!ptt) {
+ DP_ERR(hwfn, "Failed to acquire ptt\n");
+ return -EBUSY;
+ }
+
+ /* Clean WFQ db and configure equal weight for all vports */
+ qed_clean_wfq_db(hwfn, ptt);
+
+ qed_for_each_vf(hwfn, j) {
+ int k;
+
+ if (!qed_iov_is_valid_vfid(hwfn, j, true))
+ continue;
+
+ /* Wait until VF is disabled before releasing */
+ for (k = 0; k < 100; k++) {
+ if (!qed_iov_is_vf_stopped(hwfn, j))
+ msleep(20);
+ else
+ break;
+ }
+
+ if (k < 100)
+ qed_iov_release_hw_for_vf(&cdev->hwfns[i],
+ ptt, j);
+ else
+ DP_ERR(hwfn,
+ "Timeout waiting for VF's FLR to end\n");
+ }
+
+ qed_ptt_release(hwfn, ptt);
+ }
+
+ qed_iov_set_vfs_to_disable(cdev, false);
+
+ return 0;
+}
+
+static int qed_sriov_enable(struct qed_dev *cdev, int num)
+{
+ struct qed_sb_cnt_info sb_cnt_info;
+ int i, j, rc;
+
+ if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) {
+ DP_NOTICE(cdev, "Can start at most %d VFs\n",
+ RESC_NUM(&cdev->hwfns[0], QED_VPORT) - 1);
+ return -EINVAL;
+ }
+
+ /* Initialize HW for VF access */
+ for_each_hwfn(cdev, j) {
+ struct qed_hwfn *hwfn = &cdev->hwfns[j];
+ struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
+ int num_sbs = 0, limit = 16;
+
+ if (!ptt) {
+ DP_ERR(hwfn, "Failed to acquire ptt\n");
+ rc = -EBUSY;
+ goto err;
+ }
+
+ if (IS_MF_DEFAULT(hwfn))
+ limit = MAX_NUM_VFS_BB / hwfn->num_funcs_on_engine;
+
+ memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
+ qed_int_get_num_sbs(hwfn, &sb_cnt_info);
+ num_sbs = min_t(int, sb_cnt_info.sb_free_blk, limit);
+
+ for (i = 0; i < num; i++) {
+ if (!qed_iov_is_valid_vfid(hwfn, i, false))
+ continue;
+
+ rc = qed_iov_init_hw_for_vf(hwfn,
+ ptt, i, num_sbs / num);
+ if (rc) {
+ DP_ERR(cdev, "Failed to enable VF[%d]\n", i);
+ qed_ptt_release(hwfn, ptt);
+ goto err;
+ }
+ }
+
+ qed_ptt_release(hwfn, ptt);
+ }
+
+ /* Enable SRIOV PCIe functions */
+ rc = pci_enable_sriov(cdev->pdev, num);
+ if (rc) {
+ DP_ERR(cdev, "Failed to enable sriov [%d]\n", rc);
+ goto err;
+ }
+
+ return num;
+
+err:
+ qed_sriov_disable(cdev, false);
+ return rc;
+}
+
+static int qed_sriov_configure(struct qed_dev *cdev, int num_vfs_param)
+{
+ if (!IS_QED_SRIOV(cdev)) {
+ DP_VERBOSE(cdev, QED_MSG_IOV, "SR-IOV is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (num_vfs_param)
+ return qed_sriov_enable(cdev, num_vfs_param);
+ else
+ return qed_sriov_disable(cdev, true);
+}
+
+static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid)
+{
+ int i;
+
+ if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) {
+ DP_VERBOSE(cdev, QED_MSG_IOV,
+ "Cannot set a VF MAC; Sriov is not enabled\n");
+ return -EINVAL;
+ }
+
+ if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true)) {
+ DP_VERBOSE(cdev, QED_MSG_IOV,
+ "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
+ return -EINVAL;
+ }
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *hwfn = &cdev->hwfns[i];
+ struct qed_public_vf_info *vf_info;
+
+ vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
+ if (!vf_info)
+ continue;
+
+ /* Set the forced MAC, and schedule the IOV task */
+ ether_addr_copy(vf_info->forced_mac, mac);
+ qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
+ }
+
+ return 0;
+}
+
+static int qed_sriov_pf_set_vlan(struct qed_dev *cdev, u16 vid, int vfid)
+{
+ int i;
+
+ if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) {
+ DP_VERBOSE(cdev, QED_MSG_IOV,
+ "Cannot set a VF MAC; Sriov is not enabled\n");
+ return -EINVAL;
+ }
+
+ if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true)) {
+ DP_VERBOSE(cdev, QED_MSG_IOV,
+ "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
+ return -EINVAL;
+ }
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *hwfn = &cdev->hwfns[i];
+ struct qed_public_vf_info *vf_info;
+
+ vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
+ if (!vf_info)
+ continue;
+
+ /* Set the forced vlan, and schedule the IOV task */
+ vf_info->forced_vlan = vid;
+ qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
+ }
+
+ return 0;
+}
+
+static int qed_get_vf_config(struct qed_dev *cdev,
+ int vf_id, struct ifla_vf_info *ivi)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_public_vf_info *vf_info;
+ struct qed_mcp_link_state link;
+ u32 tx_rate;
+
+ /* Sanitize request */
+ if (IS_VF(cdev))
+ return -EINVAL;
+
+ if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true)) {
+ DP_VERBOSE(cdev, QED_MSG_IOV,
+ "VF index [%d] isn't active\n", vf_id);
+ return -EINVAL;
+ }
+
+ vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true);
+
+ qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL);
+
+ /* Fill information about VF */
+ ivi->vf = vf_id;
+
+ if (is_valid_ether_addr(vf_info->forced_mac))
+ ether_addr_copy(ivi->mac, vf_info->forced_mac);
+ else
+ ether_addr_copy(ivi->mac, vf_info->mac);
+
+ ivi->vlan = vf_info->forced_vlan;
+ ivi->spoofchk = qed_iov_spoofchk_get(hwfn, vf_id);
+ ivi->linkstate = vf_info->link_state;
+ tx_rate = vf_info->tx_rate;
+ ivi->max_tx_rate = tx_rate ? tx_rate : link.speed;
+ ivi->min_tx_rate = qed_iov_get_vf_min_rate(hwfn, vf_id);
+
+ return 0;
+}
+
+void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
+{
+ struct qed_mcp_link_capabilities caps;
+ struct qed_mcp_link_params params;
+ struct qed_mcp_link_state link;
+ int i;
+
+ if (!hwfn->pf_iov_info)
+ return;
+
+ /* Update bulletin of all future possible VFs with link configuration */
+ for (i = 0; i < hwfn->cdev->p_iov_info->total_vfs; i++) {
+ struct qed_public_vf_info *vf_info;
+
+ vf_info = qed_iov_get_public_vf_info(hwfn, i, false);
+ if (!vf_info)
+ continue;
+
+ memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params));
+ memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link));
+ memcpy(&caps, qed_mcp_get_link_capabilities(hwfn),
+ sizeof(caps));
+
+ /* Modify link according to the VF's configured link state */
+ switch (vf_info->link_state) {
+ case IFLA_VF_LINK_STATE_DISABLE:
+ link.link_up = false;
+ break;
+ case IFLA_VF_LINK_STATE_ENABLE:
+ link.link_up = true;
+ /* Set speed according to maximum supported by HW.
+ * that is 40G for regular devices and 100G for CMT
+ * mode devices.
+ */
+ link.speed = (hwfn->cdev->num_hwfns > 1) ?
+ 100000 : 40000;
+ default:
+ /* In auto mode pass PF link image to VF */
+ break;
+ }
+
+ if (link.link_up && vf_info->tx_rate) {
+ struct qed_ptt *ptt;
+ int rate;
+
+ rate = min_t(int, vf_info->tx_rate, link.speed);
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt) {
+ DP_NOTICE(hwfn, "Failed to acquire PTT\n");
+ return;
+ }
+
+ if (!qed_iov_configure_tx_rate(hwfn, ptt, i, rate)) {
+ vf_info->tx_rate = rate;
+ link.speed = rate;
+ }
+
+ qed_ptt_release(hwfn, ptt);
+ }
+
+ qed_iov_set_link(hwfn, i, &params, &link, &caps);
+ }
+
+ qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
+}
+
+static int qed_set_vf_link_state(struct qed_dev *cdev,
+ int vf_id, int link_state)
+{
+ int i;
+
+ /* Sanitize request */
+ if (IS_VF(cdev))
+ return -EINVAL;
+
+ if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true)) {
+ DP_VERBOSE(cdev, QED_MSG_IOV,
+ "VF index [%d] isn't active\n", vf_id);
+ return -EINVAL;
+ }
+
+ /* Handle configuration of link state */
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *hwfn = &cdev->hwfns[i];
+ struct qed_public_vf_info *vf;
+
+ vf = qed_iov_get_public_vf_info(hwfn, vf_id, true);
+ if (!vf)
+ continue;
+
+ if (vf->link_state == link_state)
+ continue;
+
+ vf->link_state = link_state;
+ qed_inform_vf_link_state(&cdev->hwfns[i]);
+ }
+
+ return 0;
+}
+
+static int qed_spoof_configure(struct qed_dev *cdev, int vfid, bool val)
+{
+ int i, rc = -EINVAL;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ rc = qed_iov_spoofchk_set(p_hwfn, vfid, val);
+ if (rc)
+ break;
+ }
+
+ return rc;
+}
+
+static int qed_configure_max_vf_rate(struct qed_dev *cdev, int vfid, int rate)
+{
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ struct qed_public_vf_info *vf;
+
+ if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
+ DP_NOTICE(p_hwfn,
+ "SR-IOV sanity check failed, can't set tx rate\n");
+ return -EINVAL;
+ }
+
+ vf = qed_iov_get_public_vf_info(p_hwfn, vfid, true);
+
+ vf->tx_rate = rate;
+
+ qed_inform_vf_link_state(p_hwfn);
+ }
+
+ return 0;
+}
+
+static int qed_set_vf_rate(struct qed_dev *cdev,
+ int vfid, u32 min_rate, u32 max_rate)
+{
+ int rc_min = 0, rc_max = 0;
+
+ if (max_rate)
+ rc_max = qed_configure_max_vf_rate(cdev, vfid, max_rate);
+
+ if (min_rate)
+ rc_min = qed_iov_configure_min_tx_rate(cdev, vfid, min_rate);
+
+ if (rc_max | rc_min)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void qed_handle_vf_msg(struct qed_hwfn *hwfn)
+{
+ u64 events[QED_VF_ARRAY_LENGTH];
+ struct qed_ptt *ptt;
+ int i;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt) {
+ DP_VERBOSE(hwfn, QED_MSG_IOV,
+ "Can't acquire PTT; re-scheduling\n");
+ qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG);
+ return;
+ }
+
+ qed_iov_pf_get_and_clear_pending_events(hwfn, events);
+
+ DP_VERBOSE(hwfn, QED_MSG_IOV,
+ "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n",
+ events[0], events[1], events[2]);
+
+ qed_for_each_vf(hwfn, i) {
+ /* Skip VFs with no pending messages */
+ if (!(events[i / 64] & (1ULL << (i % 64))))
+ continue;
+
+ DP_VERBOSE(hwfn, QED_MSG_IOV,
+ "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
+ i, hwfn->cdev->p_iov_info->first_vf_in_pf + i);
+
+ /* Copy VF's message to PF's request buffer for that VF */
+ if (qed_iov_copy_vf_msg(hwfn, ptt, i))
+ continue;
+
+ qed_iov_process_mbx_req(hwfn, ptt, i);
+ }
+
+ qed_ptt_release(hwfn, ptt);
+}
+
+static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn)
+{
+ int i;
+
+ qed_for_each_vf(hwfn, i) {
+ struct qed_public_vf_info *info;
+ bool update = false;
+ u8 *mac;
+
+ info = qed_iov_get_public_vf_info(hwfn, i, true);
+ if (!info)
+ continue;
+
+ /* Update data on bulletin board */
+ mac = qed_iov_bulletin_get_forced_mac(hwfn, i);
+ if (is_valid_ether_addr(info->forced_mac) &&
+ (!mac || !ether_addr_equal(mac, info->forced_mac))) {
+ DP_VERBOSE(hwfn,
+ QED_MSG_IOV,
+ "Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n",
+ i,
+ hwfn->cdev->p_iov_info->first_vf_in_pf + i);
+
+ /* Update bulletin board with forced MAC */
+ qed_iov_bulletin_set_forced_mac(hwfn,
+ info->forced_mac, i);
+ update = true;
+ }
+
+ if (qed_iov_bulletin_get_forced_vlan(hwfn, i) ^
+ info->forced_vlan) {
+ DP_VERBOSE(hwfn,
+ QED_MSG_IOV,
+ "Handling PF setting of pvid [0x%04x] to VF 0x%02x [Abs 0x%02x]\n",
+ info->forced_vlan,
+ i,
+ hwfn->cdev->p_iov_info->first_vf_in_pf + i);
+ qed_iov_bulletin_set_forced_vlan(hwfn,
+ info->forced_vlan, i);
+ update = true;
+ }
+
+ if (update)
+ qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
+ }
+}
+
+static void qed_handle_bulletin_post(struct qed_hwfn *hwfn)
+{
+ struct qed_ptt *ptt;
+ int i;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt) {
+ DP_NOTICE(hwfn, "Failed allocating a ptt entry\n");
+ qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
+ return;
+ }
+
+ qed_for_each_vf(hwfn, i)
+ qed_iov_post_vf_bulletin(hwfn, i, ptt);
+
+ qed_ptt_release(hwfn, ptt);
+}
+
+void qed_iov_pf_task(struct work_struct *work)
+{
+ struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
+ iov_task.work);
+ int rc;
+
+ if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags))
+ return;
+
+ if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG, &hwfn->iov_task_flags)) {
+ struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
+
+ if (!ptt) {
+ qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
+ return;
+ }
+
+ rc = qed_iov_vf_flr_cleanup(hwfn, ptt);
+ if (rc)
+ qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
+
+ qed_ptt_release(hwfn, ptt);
+ }
+
+ if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags))
+ qed_handle_vf_msg(hwfn);
+
+ if (test_and_clear_bit(QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
+ &hwfn->iov_task_flags))
+ qed_handle_pf_set_vf_unicast(hwfn);
+
+ if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
+ &hwfn->iov_task_flags))
+ qed_handle_bulletin_post(hwfn);
+}
+
+void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
+{
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ if (!cdev->hwfns[i].iov_wq)
+ continue;
+
+ if (schedule_first) {
+ qed_schedule_iov(&cdev->hwfns[i],
+ QED_IOV_WQ_STOP_WQ_FLAG);
+ cancel_delayed_work_sync(&cdev->hwfns[i].iov_task);
+ }
+
+ flush_workqueue(cdev->hwfns[i].iov_wq);
+ destroy_workqueue(cdev->hwfns[i].iov_wq);
+ }
+}
+
+int qed_iov_wq_start(struct qed_dev *cdev)
+{
+ char name[NAME_SIZE];
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ /* PFs needs a dedicated workqueue only if they support IOV.
+ * VFs always require one.
+ */
+ if (IS_PF(p_hwfn->cdev) && !IS_PF_SRIOV(p_hwfn))
+ continue;
+
+ snprintf(name, NAME_SIZE, "iov-%02x:%02x.%02x",
+ cdev->pdev->bus->number,
+ PCI_SLOT(cdev->pdev->devfn), p_hwfn->abs_pf_id);
+
+ p_hwfn->iov_wq = create_singlethread_workqueue(name);
+ if (!p_hwfn->iov_wq) {
+ DP_NOTICE(p_hwfn, "Cannot create iov workqueue\n");
+ return -ENOMEM;
+ }
+
+ if (IS_PF(cdev))
+ INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task);
+ else
+ INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_vf_task);
+ }
+
+ return 0;
+}
+
+const struct qed_iov_hv_ops qed_iov_ops_pass = {
+ .configure = &qed_sriov_configure,
+ .set_mac = &qed_sriov_pf_set_mac,
+ .set_vlan = &qed_sriov_pf_set_vlan,
+ .get_config = &qed_get_vf_config,
+ .set_link_state = &qed_set_vf_link_state,
+ .set_spoof = &qed_spoof_configure,
+ .set_rate = &qed_set_vf_rate,
+};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
new file mode 100644
index 0000000..c8667c6
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
@@ -0,0 +1,386 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_SRIOV_H
+#define _QED_SRIOV_H
+#include <linux/types.h>
+#include "qed_vf.h"
+#define QED_VF_ARRAY_LENGTH (3)
+
+#define IS_VF(cdev) ((cdev)->b_is_vf)
+#define IS_PF(cdev) (!((cdev)->b_is_vf))
+#ifdef CONFIG_QED_SRIOV
+#define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->cdev->p_iov_info))
+#else
+#define IS_PF_SRIOV(p_hwfn) (0)
+#endif
+#define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info))
+
+#define QED_MAX_VF_CHAINS_PER_PF 16
+#define QED_ETH_VF_NUM_VLAN_FILTERS 2
+
+#define QED_ETH_MAX_VF_NUM_VLAN_FILTERS \
+ (MAX_NUM_VFS * QED_ETH_VF_NUM_VLAN_FILTERS)
+
+enum qed_iov_vport_update_flag {
+ QED_IOV_VP_UPDATE_ACTIVATE,
+ QED_IOV_VP_UPDATE_VLAN_STRIP,
+ QED_IOV_VP_UPDATE_TX_SWITCH,
+ QED_IOV_VP_UPDATE_MCAST,
+ QED_IOV_VP_UPDATE_ACCEPT_PARAM,
+ QED_IOV_VP_UPDATE_RSS,
+ QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN,
+ QED_IOV_VP_UPDATE_SGE_TPA,
+ QED_IOV_VP_UPDATE_MAX,
+};
+
+struct qed_public_vf_info {
+ /* These copies will later be reflected in the bulletin board,
+ * but this copy should be newer.
+ */
+ u8 forced_mac[ETH_ALEN];
+ u16 forced_vlan;
+ u8 mac[ETH_ALEN];
+
+ /* IFLA_VF_LINK_STATE_<X> */
+ int link_state;
+
+ /* Currently configured Tx rate in MB/sec. 0 if unconfigured */
+ int tx_rate;
+};
+
+/* This struct is part of qed_dev and contains data relevant to all hwfns;
+ * Initialized only if SR-IOV cpabability is exposed in PCIe config space.
+ */
+struct qed_hw_sriov_info {
+ int pos; /* capability position */
+ int nres; /* number of resources */
+ u32 cap; /* SR-IOV Capabilities */
+ u16 ctrl; /* SR-IOV Control */
+ u16 total_vfs; /* total VFs associated with the PF */
+ u16 num_vfs; /* number of vfs that have been started */
+ u16 initial_vfs; /* initial VFs associated with the PF */
+ u16 nr_virtfn; /* number of VFs available */
+ u16 offset; /* first VF Routing ID offset */
+ u16 stride; /* following VF stride */
+ u16 vf_device_id; /* VF device id */
+ u32 pgsz; /* page size for BAR alignment */
+ u8 link; /* Function Dependency Link */
+
+ u32 first_vf_in_pf;
+};
+
+/* This mailbox is maintained per VF in its PF contains all information
+ * required for sending / receiving a message.
+ */
+struct qed_iov_vf_mbx {
+ union vfpf_tlvs *req_virt;
+ dma_addr_t req_phys;
+ union pfvf_tlvs *reply_virt;
+ dma_addr_t reply_phys;
+
+ /* Address in VF where a pending message is located */
+ dma_addr_t pending_req;
+
+ u8 *offset;
+
+ /* saved VF request header */
+ struct vfpf_first_tlv first_tlv;
+};
+
+struct qed_vf_q_info {
+ u16 fw_rx_qid;
+ u16 fw_tx_qid;
+ u8 fw_cid;
+ u8 rxq_active;
+ u8 txq_active;
+};
+
+enum vf_state {
+ VF_FREE = 0, /* VF ready to be acquired holds no resc */
+ VF_ACQUIRED, /* VF, acquired, but not initalized */
+ VF_ENABLED, /* VF, Enabled */
+ VF_RESET, /* VF, FLR'd, pending cleanup */
+ VF_STOPPED /* VF, Stopped */
+};
+
+struct qed_vf_vlan_shadow {
+ bool used;
+ u16 vid;
+};
+
+struct qed_vf_shadow_config {
+ /* Shadow copy of all guest vlans */
+ struct qed_vf_vlan_shadow vlans[QED_ETH_VF_NUM_VLAN_FILTERS + 1];
+
+ u8 inner_vlan_removal;
+};
+
+/* PFs maintain an array of this structure, per VF */
+struct qed_vf_info {
+ struct qed_iov_vf_mbx vf_mbx;
+ enum vf_state state;
+ bool b_init;
+ u8 to_disable;
+
+ struct qed_bulletin bulletin;
+ dma_addr_t vf_bulletin;
+
+ u32 concrete_fid;
+ u16 opaque_fid;
+ u16 mtu;
+
+ u8 vport_id;
+ u8 relative_vf_id;
+ u8 abs_vf_id;
+#define QED_VF_ABS_ID(p_hwfn, p_vf) (QED_PATH_ID(p_hwfn) ? \
+ (p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \
+ (p_vf)->abs_vf_id)
+
+ u8 vport_instance;
+ u8 num_rxqs;
+ u8 num_txqs;
+
+ u8 num_sbs;
+
+ u8 num_mac_filters;
+ u8 num_vlan_filters;
+ struct qed_vf_q_info vf_queues[QED_MAX_VF_CHAINS_PER_PF];
+ u16 igu_sbs[QED_MAX_VF_CHAINS_PER_PF];
+ u8 num_active_rxqs;
+ struct qed_public_vf_info p_vf_info;
+ bool spoof_chk;
+ bool req_spoofchk_val;
+
+ /* Stores the configuration requested by VF */
+ struct qed_vf_shadow_config shadow_config;
+
+ /* A bitfield using bulletin's valid-map bits, used to indicate
+ * which of the bulletin board features have been configured.
+ */
+ u64 configured_features;
+#define QED_IOV_CONFIGURED_FEATURES_MASK ((1 << MAC_ADDR_FORCED) | \
+ (1 << VLAN_ADDR_FORCED))
+};
+
+/* This structure is part of qed_hwfn and used only for PFs that have sriov
+ * capability enabled.
+ */
+struct qed_pf_iov {
+ struct qed_vf_info vfs_array[MAX_NUM_VFS];
+ u64 pending_events[QED_VF_ARRAY_LENGTH];
+ u64 pending_flr[QED_VF_ARRAY_LENGTH];
+
+ /* Allocate message address continuosuly and split to each VF */
+ void *mbx_msg_virt_addr;
+ dma_addr_t mbx_msg_phys_addr;
+ u32 mbx_msg_size;
+ void *mbx_reply_virt_addr;
+ dma_addr_t mbx_reply_phys_addr;
+ u32 mbx_reply_size;
+ void *p_bulletins;
+ dma_addr_t bulletins_phys;
+ u32 bulletins_size;
+};
+
+enum qed_iov_wq_flag {
+ QED_IOV_WQ_MSG_FLAG,
+ QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
+ QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
+ QED_IOV_WQ_STOP_WQ_FLAG,
+ QED_IOV_WQ_FLR_FLAG,
+};
+
+#ifdef CONFIG_QED_SRIOV
+/**
+ * @brief - Given a VF index, return index of next [including that] active VF.
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return MAX_NUM_VFS in case no further active VFs, otherwise index.
+ */
+u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id);
+
+/**
+ * @brief Read sriov related information and allocated resources
+ * reads from configuraiton space, shmem, etc.
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_iov_hw_info(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_add_tlv - place a given tlv on the tlv buffer at next offset
+ *
+ * @param p_hwfn
+ * @param p_iov
+ * @param type
+ * @param length
+ *
+ * @return pointer to the newly placed tlv
+ */
+void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length);
+
+/**
+ * @brief list the types and lengths of the tlvs on the buffer
+ *
+ * @param p_hwfn
+ * @param tlvs_list
+ */
+void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list);
+
+/**
+ * @brief qed_iov_alloc - allocate sriov related resources
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_iov_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_iov_setup - setup sriov related resources
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_iov_free - free sriov related resources
+ *
+ * @param p_hwfn
+ */
+void qed_iov_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief free sriov related memory that was allocated during hw_prepare
+ *
+ * @param cdev
+ */
+void qed_iov_free_hw_info(struct qed_dev *cdev);
+
+/**
+ * @brief qed_sriov_eqe_event - handle async sriov event arrived on eqe.
+ *
+ * @param p_hwfn
+ * @param opcode
+ * @param echo
+ * @param data
+ */
+int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
+ u8 opcode, __le16 echo, union event_ring_data *data);
+
+/**
+ * @brief Mark structs of vfs that have been FLR-ed.
+ *
+ * @param p_hwfn
+ * @param disabled_vfs - bitmask of all VFs on path that were FLRed
+ *
+ * @return 1 iff one of the PF's vfs got FLRed. 0 otherwise.
+ */
+int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs);
+
+/**
+ * @brief Search extended TLVs in request/reply buffer.
+ *
+ * @param p_hwfn
+ * @param p_tlvs_list - Pointer to tlvs list
+ * @param req_type - Type of TLV
+ *
+ * @return pointer to tlv type if found, otherwise returns NULL.
+ */
+void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
+ void *p_tlvs_list, u16 req_type);
+
+void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first);
+int qed_iov_wq_start(struct qed_dev *cdev);
+
+void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag);
+void qed_vf_start_iov_wq(struct qed_dev *cdev);
+int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled);
+void qed_inform_vf_link_state(struct qed_hwfn *hwfn);
+#else
+static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn,
+ u16 rel_vf_id)
+{
+ return MAX_NUM_VFS;
+}
+
+static inline int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
+{
+ return 0;
+}
+
+static inline int qed_iov_alloc(struct qed_hwfn *p_hwfn)
+{
+ return 0;
+}
+
+static inline void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+}
+
+static inline void qed_iov_free(struct qed_hwfn *p_hwfn)
+{
+}
+
+static inline void qed_iov_free_hw_info(struct qed_dev *cdev)
+{
+}
+
+static inline int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
+ u8 opcode,
+ __le16 echo, union event_ring_data *data)
+{
+ return -EINVAL;
+}
+
+static inline int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn,
+ u32 *disabled_vfs)
+{
+ return 0;
+}
+
+static inline void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
+{
+}
+
+static inline int qed_iov_wq_start(struct qed_dev *cdev)
+{
+ return 0;
+}
+
+static inline void qed_schedule_iov(struct qed_hwfn *hwfn,
+ enum qed_iov_wq_flag flag)
+{
+}
+
+static inline void qed_vf_start_iov_wq(struct qed_dev *cdev)
+{
+}
+
+static inline int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
+{
+ return 0;
+}
+
+static inline void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
+{
+}
+#endif
+
+#define qed_for_each_vf(_p_hwfn, _i) \
+ for (_i = qed_iov_get_next_active_vf(_p_hwfn, 0); \
+ _i < MAX_NUM_VFS; \
+ _i = qed_iov_get_next_active_vf(_p_hwfn, _i + 1))
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
new file mode 100644
index 0000000..72e69c0
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -0,0 +1,1102 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/crc32.h>
+#include <linux/etherdevice.h>
+#include "qed.h"
+#include "qed_sriov.h"
+#include "qed_vf.h"
+
+static void *qed_vf_pf_prep(struct qed_hwfn *p_hwfn, u16 type, u16 length)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ void *p_tlv;
+
+ /* This lock is released when we receive PF's response
+ * in qed_send_msg2pf().
+ * So, qed_vf_pf_prep() and qed_send_msg2pf()
+ * must come in sequence.
+ */
+ mutex_lock(&(p_iov->mutex));
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "preparing to send 0x%04x tlv over vf pf channel\n",
+ type);
+
+ /* Reset Requst offset */
+ p_iov->offset = (u8 *)p_iov->vf2pf_request;
+
+ /* Clear mailbox - both request and reply */
+ memset(p_iov->vf2pf_request, 0, sizeof(union vfpf_tlvs));
+ memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
+
+ /* Init type and length */
+ p_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, type, length);
+
+ /* Init first tlv header */
+ ((struct vfpf_first_tlv *)p_tlv)->reply_address =
+ (u64)p_iov->pf2vf_reply_phys;
+
+ return p_tlv;
+}
+
+static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
+{
+ union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
+ struct ustorm_trigger_vf_zone trigger;
+ struct ustorm_vf_zone *zone_data;
+ int rc = 0, time = 100;
+
+ zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B;
+
+ /* output tlvs list */
+ qed_dp_tlv_list(p_hwfn, p_req);
+
+ /* need to add the END TLV to the message size */
+ resp_size += sizeof(struct channel_list_end_tlv);
+
+ /* Send TLVs over HW channel */
+ memset(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone));
+ trigger.vf_pf_msg_valid = 1;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF -> PF [%02x] message: [%08x, %08x] --> %p, %08x --> %p\n",
+ GET_FIELD(p_hwfn->hw_info.concrete_fid,
+ PXP_CONCRETE_FID_PFID),
+ upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys),
+ lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys),
+ &zone_data->non_trigger.vf_pf_msg_addr,
+ *((u32 *)&trigger), &zone_data->trigger);
+
+ REG_WR(p_hwfn,
+ (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.lo,
+ lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys));
+
+ REG_WR(p_hwfn,
+ (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.hi,
+ upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys));
+
+ /* The message data must be written first, to prevent trigger before
+ * data is written.
+ */
+ wmb();
+
+ REG_WR(p_hwfn, (uintptr_t)&zone_data->trigger, *((u32 *)&trigger));
+
+ /* When PF would be done with the response, it would write back to the
+ * `done' address. Poll until then.
+ */
+ while ((!*done) && time) {
+ msleep(25);
+ time--;
+ }
+
+ if (!*done) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF <-- PF Timeout [Type %d]\n",
+ p_req->first_tlv.tl.type);
+ rc = -EBUSY;
+ goto exit;
+ } else {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "PF response: %d [Type %d]\n",
+ *done, p_req->first_tlv.tl.type);
+ }
+
+exit:
+ mutex_unlock(&(p_hwfn->vf_iov_info->mutex));
+
+ return rc;
+}
+
+#define VF_ACQUIRE_THRESH 3
+#define VF_ACQUIRE_MAC_FILTERS 1
+
+static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
+ struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
+ u8 rx_count = 1, tx_count = 1, num_sbs = 1;
+ u8 num_mac = VF_ACQUIRE_MAC_FILTERS;
+ bool resources_acquired = false;
+ struct vfpf_acquire_tlv *req;
+ int rc = 0, attempts = 0;
+
+ /* clear mailbox and prep first tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req));
+
+ /* starting filling the request */
+ req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+ req->resc_request.num_rxqs = rx_count;
+ req->resc_request.num_txqs = tx_count;
+ req->resc_request.num_sbs = num_sbs;
+ req->resc_request.num_mac_filters = num_mac;
+ req->resc_request.num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
+
+ req->vfdev_info.os_type = VFPF_ACQUIRE_OS_LINUX;
+ req->vfdev_info.fw_major = FW_MAJOR_VERSION;
+ req->vfdev_info.fw_minor = FW_MINOR_VERSION;
+ req->vfdev_info.fw_revision = FW_REVISION_VERSION;
+ req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION;
+
+ /* Fill capability field with any non-deprecated config we support */
+ req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G;
+
+ /* pf 2 vf bulletin board address */
+ req->bulletin_addr = p_iov->bulletin.phys;
+ req->bulletin_size = p_iov->bulletin.size;
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ while (!resources_acquired) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV, "attempting to acquire resources\n");
+
+ /* send acquire request */
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ /* copy acquire response from buffer to p_hwfn */
+ memcpy(&p_iov->acquire_resp, resp, sizeof(p_iov->acquire_resp));
+
+ attempts++;
+
+ if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
+ /* PF agrees to allocate our resources */
+ if (!(resp->pfdev_info.capabilities &
+ PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE)) {
+ DP_INFO(p_hwfn,
+ "PF is using old incompatible driver; Either downgrade driver or request provider to update hypervisor version\n");
+ return -EINVAL;
+ }
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV, "resources acquired\n");
+ resources_acquired = true;
+ } else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE &&
+ attempts < VF_ACQUIRE_THRESH) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "PF unwilling to fullfill resource request. Try PF recommended amount\n");
+
+ /* humble our request */
+ req->resc_request.num_txqs = resp->resc.num_txqs;
+ req->resc_request.num_rxqs = resp->resc.num_rxqs;
+ req->resc_request.num_sbs = resp->resc.num_sbs;
+ req->resc_request.num_mac_filters =
+ resp->resc.num_mac_filters;
+ req->resc_request.num_vlan_filters =
+ resp->resc.num_vlan_filters;
+
+ /* Clear response buffer */
+ memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
+ } else {
+ DP_ERR(p_hwfn,
+ "PF returned error %d to VF acquisition request\n",
+ resp->hdr.status);
+ return -EAGAIN;
+ }
+ }
+
+ /* Update bulletin board size with response from PF */
+ p_iov->bulletin.size = resp->bulletin_size;
+
+ /* get HW info */
+ p_hwfn->cdev->type = resp->pfdev_info.dev_type;
+ p_hwfn->cdev->chip_rev = resp->pfdev_info.chip_rev;
+
+ p_hwfn->cdev->chip_num = pfdev_info->chip_num & 0xffff;
+
+ /* Learn of the possibility of CMT */
+ if (IS_LEAD_HWFN(p_hwfn)) {
+ if (resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_100G) {
+ DP_NOTICE(p_hwfn, "100g VF\n");
+ p_hwfn->cdev->num_hwfns = 2;
+ }
+ }
+
+ return 0;
+}
+
+int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
+{
+ struct qed_vf_iov *p_iov;
+ u32 reg;
+
+ /* Set number of hwfns - might be overriden once leading hwfn learns
+ * actual configuration from PF.
+ */
+ if (IS_LEAD_HWFN(p_hwfn))
+ p_hwfn->cdev->num_hwfns = 1;
+
+ /* Set the doorbell bar. Assumption: regview is set */
+ p_hwfn->doorbells = (u8 __iomem *)p_hwfn->regview +
+ PXP_VF_BAR0_START_DQ;
+
+ reg = PXP_VF_BAR0_ME_OPAQUE_ADDRESS;
+ p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, reg);
+
+ reg = PXP_VF_BAR0_ME_CONCRETE_ADDRESS;
+ p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, reg);
+
+ /* Allocate vf sriov info */
+ p_iov = kzalloc(sizeof(*p_iov), GFP_KERNEL);
+ if (!p_iov) {
+ DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sriov'\n");
+ return -ENOMEM;
+ }
+
+ /* Allocate vf2pf msg */
+ p_iov->vf2pf_request = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(union vfpf_tlvs),
+ &p_iov->vf2pf_request_phys,
+ GFP_KERNEL);
+ if (!p_iov->vf2pf_request) {
+ DP_NOTICE(p_hwfn,
+ "Failed to allocate `vf2pf_request' DMA memory\n");
+ goto free_p_iov;
+ }
+
+ p_iov->pf2vf_reply = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(union pfvf_tlvs),
+ &p_iov->pf2vf_reply_phys,
+ GFP_KERNEL);
+ if (!p_iov->pf2vf_reply) {
+ DP_NOTICE(p_hwfn,
+ "Failed to allocate `pf2vf_reply' DMA memory\n");
+ goto free_vf2pf_request;
+ }
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF's Request mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys]\n",
+ p_iov->vf2pf_request,
+ (u64) p_iov->vf2pf_request_phys,
+ p_iov->pf2vf_reply, (u64)p_iov->pf2vf_reply_phys);
+
+ /* Allocate Bulletin board */
+ p_iov->bulletin.size = sizeof(struct qed_bulletin_content);
+ p_iov->bulletin.p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ p_iov->bulletin.size,
+ &p_iov->bulletin.phys,
+ GFP_KERNEL);
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF's bulletin Board [%p virt 0x%llx phys 0x%08x bytes]\n",
+ p_iov->bulletin.p_virt,
+ (u64)p_iov->bulletin.phys, p_iov->bulletin.size);
+
+ mutex_init(&p_iov->mutex);
+
+ p_hwfn->vf_iov_info = p_iov;
+
+ p_hwfn->hw_info.personality = QED_PCI_ETH;
+
+ return qed_vf_pf_acquire(p_hwfn);
+
+free_vf2pf_request:
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(union vfpf_tlvs),
+ p_iov->vf2pf_request, p_iov->vf2pf_request_phys);
+free_p_iov:
+ kfree(p_iov);
+
+ return -ENOMEM;
+}
+
+int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
+ u8 rx_qid,
+ u16 sb,
+ u8 sb_index,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size, void __iomem **pp_prod)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_start_queue_resp_tlv *resp;
+ struct vfpf_start_rxq_tlv *req;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_RXQ, sizeof(*req));
+
+ req->rx_qid = rx_qid;
+ req->cqe_pbl_addr = cqe_pbl_addr;
+ req->cqe_pbl_size = cqe_pbl_size;
+ req->rxq_addr = bd_chain_phys_addr;
+ req->hw_sb = sb;
+ req->sb_index = sb_index;
+ req->bd_max_bytes = bd_max_bytes;
+ req->stat_id = -1;
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->queue_start;
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return -EINVAL;
+
+ /* Learn the address of the producer from the response */
+ if (pp_prod) {
+ u64 init_prod_val = 0;
+
+ *pp_prod = (u8 __iomem *)p_hwfn->regview + resp->offset;
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Rxq[0x%02x]: producer at %p [offset 0x%08x]\n",
+ rx_qid, *pp_prod, resp->offset);
+
+ /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
+ __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64),
+ (u32 *)&init_prod_val);
+ }
+
+ return rc;
+}
+
+int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, u16 rx_qid, bool cqe_completion)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_stop_rxqs_tlv *req;
+ struct pfvf_def_resp_tlv *resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req));
+
+ req->rx_qid = rx_qid;
+ req->num_rxqs = 1;
+ req->cqe_completion = cqe_completion;
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return -EINVAL;
+
+ return rc;
+}
+
+int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
+ u16 tx_queue_id,
+ u16 sb,
+ u8 sb_index,
+ dma_addr_t pbl_addr,
+ u16 pbl_size, void __iomem **pp_doorbell)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_start_txq_tlv *req;
+ struct pfvf_def_resp_tlv *resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req));
+
+ req->tx_qid = tx_queue_id;
+
+ /* Tx */
+ req->pbl_addr = pbl_addr;
+ req->pbl_size = pbl_size;
+ req->hw_sb = sb;
+ req->sb_index = sb_index;
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return -EINVAL;
+
+ if (pp_doorbell) {
+ u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id];
+
+ *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
+ qed_db_addr(cid, DQ_DEMS_LEGACY);
+ }
+
+ return rc;
+}
+
+int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_stop_txqs_tlv *req;
+ struct pfvf_def_resp_tlv *resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req));
+
+ req->tx_qid = tx_qid;
+ req->num_txqs = 1;
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return -EINVAL;
+
+ return rc;
+}
+
+int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
+ u8 vport_id,
+ u16 mtu,
+ u8 inner_vlan_removal,
+ enum qed_tpa_mode tpa_mode,
+ u8 max_buffers_per_cqe, u8 only_untagged)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_vport_start_tlv *req;
+ struct pfvf_def_resp_tlv *resp;
+ int rc, i;
+
+ /* clear mailbox and prep first tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_START, sizeof(*req));
+
+ req->mtu = mtu;
+ req->vport_id = vport_id;
+ req->inner_vlan_removal = inner_vlan_removal;
+ req->tpa_mode = tpa_mode;
+ req->max_buffers_per_cqe = max_buffers_per_cqe;
+ req->only_untagged = only_untagged;
+
+ /* status blocks */
+ for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++)
+ if (p_hwfn->sbs_info[i])
+ req->sb_addr[i] = p_hwfn->sbs_info[i]->sb_phys;
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return -EINVAL;
+
+ return rc;
+}
+
+int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_TEARDOWN,
+ sizeof(struct vfpf_first_tlv));
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return -EINVAL;
+
+ return rc;
+}
+
+static bool
+qed_vf_handle_vp_update_is_needed(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_data,
+ u16 tlv)
+{
+ switch (tlv) {
+ case CHANNEL_TLV_VPORT_UPDATE_ACTIVATE:
+ return !!(p_data->update_vport_active_rx_flg ||
+ p_data->update_vport_active_tx_flg);
+ case CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH:
+ return !!p_data->update_tx_switching_flg;
+ case CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP:
+ return !!p_data->update_inner_vlan_removal_flg;
+ case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN:
+ return !!p_data->update_accept_any_vlan_flg;
+ case CHANNEL_TLV_VPORT_UPDATE_MCAST:
+ return !!p_data->update_approx_mcast_flg;
+ case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM:
+ return !!(p_data->accept_flags.update_rx_mode_config ||
+ p_data->accept_flags.update_tx_mode_config);
+ case CHANNEL_TLV_VPORT_UPDATE_RSS:
+ return !!p_data->rss_params;
+ case CHANNEL_TLV_VPORT_UPDATE_SGE_TPA:
+ return !!p_data->sge_tpa_params;
+ default:
+ DP_INFO(p_hwfn, "Unexpected vport-update TLV[%d]\n",
+ tlv);
+ return false;
+ }
+}
+
+static void
+qed_vf_handle_vp_update_tlvs_resp(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_data)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *p_resp;
+ u16 tlv;
+
+ for (tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
+ tlv < CHANNEL_TLV_VPORT_UPDATE_MAX; tlv++) {
+ if (!qed_vf_handle_vp_update_is_needed(p_hwfn, p_data, tlv))
+ continue;
+
+ p_resp = (struct pfvf_def_resp_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply,
+ tlv);
+ if (p_resp && p_resp->hdr.status)
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "TLV[%d] Configuration %s\n",
+ tlv,
+ (p_resp && p_resp->hdr.status) ? "succeeded"
+ : "failed");
+ }
+}
+
+int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_params)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_vport_update_tlv *req;
+ struct pfvf_def_resp_tlv *resp;
+ u8 update_rx, update_tx;
+ u32 resp_size = 0;
+ u16 size, tlv;
+ int rc;
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ resp_size = sizeof(*resp);
+
+ update_rx = p_params->update_vport_active_rx_flg;
+ update_tx = p_params->update_vport_active_tx_flg;
+
+ /* clear mailbox and prep header tlv */
+ qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_UPDATE, sizeof(*req));
+
+ /* Prepare extended tlvs */
+ if (update_rx || update_tx) {
+ struct vfpf_vport_update_activate_tlv *p_act_tlv;
+
+ size = sizeof(struct vfpf_vport_update_activate_tlv);
+ p_act_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
+ size);
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+ if (update_rx) {
+ p_act_tlv->update_rx = update_rx;
+ p_act_tlv->active_rx = p_params->vport_active_rx_flg;
+ }
+
+ if (update_tx) {
+ p_act_tlv->update_tx = update_tx;
+ p_act_tlv->active_tx = p_params->vport_active_tx_flg;
+ }
+ }
+
+ if (p_params->update_tx_switching_flg) {
+ struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
+
+ size = sizeof(struct vfpf_vport_update_tx_switch_tlv);
+ tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
+ p_tx_switch_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
+ tlv, size);
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+ p_tx_switch_tlv->tx_switching = p_params->tx_switching_flg;
+ }
+
+ if (p_params->update_approx_mcast_flg) {
+ struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
+
+ size = sizeof(struct vfpf_vport_update_mcast_bin_tlv);
+ p_mcast_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_VPORT_UPDATE_MCAST, size);
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+ memcpy(p_mcast_tlv->bins, p_params->bins,
+ sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
+ }
+
+ update_rx = p_params->accept_flags.update_rx_mode_config;
+ update_tx = p_params->accept_flags.update_tx_mode_config;
+
+ if (update_rx || update_tx) {
+ struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
+
+ tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
+ size = sizeof(struct vfpf_vport_update_accept_param_tlv);
+ p_accept_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size);
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+ if (update_rx) {
+ p_accept_tlv->update_rx_mode = update_rx;
+ p_accept_tlv->rx_accept_filter =
+ p_params->accept_flags.rx_accept_filter;
+ }
+
+ if (update_tx) {
+ p_accept_tlv->update_tx_mode = update_tx;
+ p_accept_tlv->tx_accept_filter =
+ p_params->accept_flags.tx_accept_filter;
+ }
+ }
+
+ if (p_params->rss_params) {
+ struct qed_rss_params *rss_params = p_params->rss_params;
+ struct vfpf_vport_update_rss_tlv *p_rss_tlv;
+
+ size = sizeof(struct vfpf_vport_update_rss_tlv);
+ p_rss_tlv = qed_add_tlv(p_hwfn,
+ &p_iov->offset,
+ CHANNEL_TLV_VPORT_UPDATE_RSS, size);
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+ if (rss_params->update_rss_config)
+ p_rss_tlv->update_rss_flags |=
+ VFPF_UPDATE_RSS_CONFIG_FLAG;
+ if (rss_params->update_rss_capabilities)
+ p_rss_tlv->update_rss_flags |=
+ VFPF_UPDATE_RSS_CAPS_FLAG;
+ if (rss_params->update_rss_ind_table)
+ p_rss_tlv->update_rss_flags |=
+ VFPF_UPDATE_RSS_IND_TABLE_FLAG;
+ if (rss_params->update_rss_key)
+ p_rss_tlv->update_rss_flags |= VFPF_UPDATE_RSS_KEY_FLAG;
+
+ p_rss_tlv->rss_enable = rss_params->rss_enable;
+ p_rss_tlv->rss_caps = rss_params->rss_caps;
+ p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log;
+ memcpy(p_rss_tlv->rss_ind_table, rss_params->rss_ind_table,
+ sizeof(rss_params->rss_ind_table));
+ memcpy(p_rss_tlv->rss_key, rss_params->rss_key,
+ sizeof(rss_params->rss_key));
+ }
+
+ if (p_params->update_accept_any_vlan_flg) {
+ struct vfpf_vport_update_accept_any_vlan_tlv *p_any_vlan_tlv;
+
+ size = sizeof(struct vfpf_vport_update_accept_any_vlan_tlv);
+ tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
+ p_any_vlan_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size);
+
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+ p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan;
+ p_any_vlan_tlv->update_accept_any_vlan_flg =
+ p_params->update_accept_any_vlan_flg;
+ }
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size);
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return -EINVAL;
+
+ qed_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params);
+
+ return rc;
+}
+
+int qed_vf_pf_reset(struct qed_hwfn *p_hwfn)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *resp;
+ struct vfpf_first_tlv *req;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req));
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return -EAGAIN;
+
+ p_hwfn->b_int_enabled = 0;
+
+ return 0;
+}
+
+int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *resp;
+ struct vfpf_first_tlv *req;
+ u32 size;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req));
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+
+ if (!rc && resp->hdr.status != PFVF_STATUS_SUCCESS)
+ rc = -EAGAIN;
+
+ p_hwfn->b_int_enabled = 0;
+
+ if (p_iov->vf2pf_request)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(union vfpf_tlvs),
+ p_iov->vf2pf_request,
+ p_iov->vf2pf_request_phys);
+ if (p_iov->pf2vf_reply)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(union pfvf_tlvs),
+ p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys);
+
+ if (p_iov->bulletin.p_virt) {
+ size = sizeof(struct qed_bulletin_content);
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ size,
+ p_iov->bulletin.p_virt, p_iov->bulletin.phys);
+ }
+
+ kfree(p_hwfn->vf_iov_info);
+ p_hwfn->vf_iov_info = NULL;
+
+ return rc;
+}
+
+void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
+ struct qed_filter_mcast *p_filter_cmd)
+{
+ struct qed_sp_vport_update_params sp_params;
+ int i;
+
+ memset(&sp_params, 0, sizeof(sp_params));
+ sp_params.update_approx_mcast_flg = 1;
+
+ if (p_filter_cmd->opcode == QED_FILTER_ADD) {
+ for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
+ u32 bit;
+
+ bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
+ __set_bit(bit, sp_params.bins);
+ }
+ }
+
+ qed_vf_pf_vport_update(p_hwfn, &sp_params);
+}
+
+int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
+ struct qed_filter_ucast *p_ucast)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_ucast_filter_tlv *req;
+ struct pfvf_def_resp_tlv *resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req));
+ req->opcode = (u8) p_ucast->opcode;
+ req->type = (u8) p_ucast->type;
+ memcpy(req->mac, p_ucast->mac, ETH_ALEN);
+ req->vlan = p_ucast->vlan;
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return -EAGAIN;
+
+ return 0;
+}
+
+int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_INT_CLEANUP,
+ sizeof(struct vfpf_first_tlv));
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return -EINVAL;
+
+ return 0;
+}
+
+u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+
+ if (!p_iov) {
+ DP_NOTICE(p_hwfn, "vf_sriov_info isn't initialized\n");
+ return 0;
+ }
+
+ return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id;
+}
+
+int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct qed_bulletin_content shadow;
+ u32 crc, crc_size;
+
+ crc_size = sizeof(p_iov->bulletin.p_virt->crc);
+ *p_change = 0;
+
+ /* Need to guarantee PF is not in the middle of writing it */
+ memcpy(&shadow, p_iov->bulletin.p_virt, p_iov->bulletin.size);
+
+ /* If version did not update, no need to do anything */
+ if (shadow.version == p_iov->bulletin_shadow.version)
+ return 0;
+
+ /* Verify the bulletin we see is valid */
+ crc = crc32(0, (u8 *)&shadow + crc_size,
+ p_iov->bulletin.size - crc_size);
+ if (crc != shadow.crc)
+ return -EAGAIN;
+
+ /* Set the shadow bulletin and process it */
+ memcpy(&p_iov->bulletin_shadow, &shadow, p_iov->bulletin.size);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Read a bulletin update %08x\n", shadow.version);
+
+ *p_change = 1;
+
+ return 0;
+}
+
+void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_params *p_params,
+ struct qed_bulletin_content *p_bulletin)
+{
+ memset(p_params, 0, sizeof(*p_params));
+
+ p_params->speed.autoneg = p_bulletin->req_autoneg;
+ p_params->speed.advertised_speeds = p_bulletin->req_adv_speed;
+ p_params->speed.forced_speed = p_bulletin->req_forced_speed;
+ p_params->pause.autoneg = p_bulletin->req_autoneg_pause;
+ p_params->pause.forced_rx = p_bulletin->req_forced_rx;
+ p_params->pause.forced_tx = p_bulletin->req_forced_tx;
+ p_params->loopback_mode = p_bulletin->req_loopback;
+}
+
+void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_params *params)
+{
+ __qed_vf_get_link_params(p_hwfn, params,
+ &(p_hwfn->vf_iov_info->bulletin_shadow));
+}
+
+void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_state *p_link,
+ struct qed_bulletin_content *p_bulletin)
+{
+ memset(p_link, 0, sizeof(*p_link));
+
+ p_link->link_up = p_bulletin->link_up;
+ p_link->speed = p_bulletin->speed;
+ p_link->full_duplex = p_bulletin->full_duplex;
+ p_link->an = p_bulletin->autoneg;
+ p_link->an_complete = p_bulletin->autoneg_complete;
+ p_link->parallel_detection = p_bulletin->parallel_detection;
+ p_link->pfc_enabled = p_bulletin->pfc_enabled;
+ p_link->partner_adv_speed = p_bulletin->partner_adv_speed;
+ p_link->partner_tx_flow_ctrl_en = p_bulletin->partner_tx_flow_ctrl_en;
+ p_link->partner_rx_flow_ctrl_en = p_bulletin->partner_rx_flow_ctrl_en;
+ p_link->partner_adv_pause = p_bulletin->partner_adv_pause;
+ p_link->sfp_tx_fault = p_bulletin->sfp_tx_fault;
+}
+
+void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_state *link)
+{
+ __qed_vf_get_link_state(p_hwfn, link,
+ &(p_hwfn->vf_iov_info->bulletin_shadow));
+}
+
+void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_capabilities *p_link_caps,
+ struct qed_bulletin_content *p_bulletin)
+{
+ memset(p_link_caps, 0, sizeof(*p_link_caps));
+ p_link_caps->speed_capabilities = p_bulletin->capability_speed;
+}
+
+void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_capabilities *p_link_caps)
+{
+ __qed_vf_get_link_caps(p_hwfn, p_link_caps,
+ &(p_hwfn->vf_iov_info->bulletin_shadow));
+}
+
+void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs)
+{
+ *num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs;
+}
+
+void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac)
+{
+ memcpy(port_mac,
+ p_hwfn->vf_iov_info->acquire_resp.pfdev_info.port_mac, ETH_ALEN);
+}
+
+void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, u8 *num_vlan_filters)
+{
+ struct qed_vf_iov *p_vf;
+
+ p_vf = p_hwfn->vf_iov_info;
+ *num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters;
+}
+
+bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac)
+{
+ struct qed_bulletin_content *bulletin;
+
+ bulletin = &p_hwfn->vf_iov_info->bulletin_shadow;
+ if (!(bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)))
+ return true;
+
+ /* Forbid VF from changing a MAC enforced by PF */
+ if (ether_addr_equal(bulletin->mac, mac))
+ return false;
+
+ return false;
+}
+
+bool qed_vf_bulletin_get_forced_mac(struct qed_hwfn *hwfn,
+ u8 *dst_mac, u8 *p_is_forced)
+{
+ struct qed_bulletin_content *bulletin;
+
+ bulletin = &hwfn->vf_iov_info->bulletin_shadow;
+
+ if (bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
+ if (p_is_forced)
+ *p_is_forced = 1;
+ } else if (bulletin->valid_bitmap & (1 << VFPF_BULLETIN_MAC_ADDR)) {
+ if (p_is_forced)
+ *p_is_forced = 0;
+ } else {
+ return false;
+ }
+
+ ether_addr_copy(dst_mac, bulletin->mac);
+
+ return true;
+}
+
+void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
+ u16 *fw_major, u16 *fw_minor,
+ u16 *fw_rev, u16 *fw_eng)
+{
+ struct pf_vf_pfdev_info *info;
+
+ info = &p_hwfn->vf_iov_info->acquire_resp.pfdev_info;
+
+ *fw_major = info->fw_major;
+ *fw_minor = info->fw_minor;
+ *fw_rev = info->fw_rev;
+ *fw_eng = info->fw_eng;
+}
+
+static void qed_handle_bulletin_change(struct qed_hwfn *hwfn)
+{
+ struct qed_eth_cb_ops *ops = hwfn->cdev->protocol_ops.eth;
+ u8 mac[ETH_ALEN], is_mac_exist, is_mac_forced;
+ void *cookie = hwfn->cdev->ops_cookie;
+
+ is_mac_exist = qed_vf_bulletin_get_forced_mac(hwfn, mac,
+ &is_mac_forced);
+ if (is_mac_exist && is_mac_forced && cookie)
+ ops->force_mac(cookie, mac);
+
+ /* Always update link configuration according to bulletin */
+ qed_link_update(hwfn);
+}
+
+void qed_iov_vf_task(struct work_struct *work)
+{
+ struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
+ iov_task.work);
+ u8 change = 0;
+
+ if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags))
+ return;
+
+ /* Handle bulletin board changes */
+ qed_vf_read_bulletin(hwfn, &change);
+ if (change)
+ qed_handle_bulletin_change(hwfn);
+
+ /* As VF is polling bulletin board, need to constantly re-schedule */
+ queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, HZ);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h
new file mode 100644
index 0000000..b82fda9
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h
@@ -0,0 +1,990 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_VF_H
+#define _QED_VF_H
+
+#include "qed_l2.h"
+#include "qed_mcp.h"
+
+#define T_ETH_INDIRECTION_TABLE_SIZE 128
+#define T_ETH_RSS_KEY_SIZE 10
+
+struct vf_pf_resc_request {
+ u8 num_rxqs;
+ u8 num_txqs;
+ u8 num_sbs;
+ u8 num_mac_filters;
+ u8 num_vlan_filters;
+ u8 num_mc_filters;
+ u16 padding;
+};
+
+struct hw_sb_info {
+ u16 hw_sb_id;
+ u8 sb_qid;
+ u8 padding[5];
+};
+
+#define TLV_BUFFER_SIZE 1024
+
+enum {
+ PFVF_STATUS_WAITING,
+ PFVF_STATUS_SUCCESS,
+ PFVF_STATUS_FAILURE,
+ PFVF_STATUS_NOT_SUPPORTED,
+ PFVF_STATUS_NO_RESOURCE,
+ PFVF_STATUS_FORCED,
+};
+
+/* vf pf channel tlvs */
+/* general tlv header (used for both vf->pf request and pf->vf response) */
+struct channel_tlv {
+ u16 type;
+ u16 length;
+};
+
+/* header of first vf->pf tlv carries the offset used to calculate reponse
+ * buffer address
+ */
+struct vfpf_first_tlv {
+ struct channel_tlv tl;
+ u32 padding;
+ u64 reply_address;
+};
+
+/* header of pf->vf tlvs, carries the status of handling the request */
+struct pfvf_tlv {
+ struct channel_tlv tl;
+ u8 status;
+ u8 padding[3];
+};
+
+/* response tlv used for most tlvs */
+struct pfvf_def_resp_tlv {
+ struct pfvf_tlv hdr;
+};
+
+/* used to terminate and pad a tlv list */
+struct channel_list_end_tlv {
+ struct channel_tlv tl;
+ u8 padding[4];
+};
+
+#define VFPF_ACQUIRE_OS_LINUX (0)
+#define VFPF_ACQUIRE_OS_WINDOWS (1)
+#define VFPF_ACQUIRE_OS_ESX (2)
+#define VFPF_ACQUIRE_OS_SOLARIS (3)
+#define VFPF_ACQUIRE_OS_LINUX_USERSPACE (4)
+
+struct vfpf_acquire_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ struct vf_pf_vfdev_info {
+#define VFPF_ACQUIRE_CAP_OBSOLETE (1 << 0)
+#define VFPF_ACQUIRE_CAP_100G (1 << 1) /* VF can support 100g */
+ u64 capabilities;
+ u8 fw_major;
+ u8 fw_minor;
+ u8 fw_revision;
+ u8 fw_engineering;
+ u32 driver_version;
+ u16 opaque_fid; /* ME register value */
+ u8 os_type; /* VFPF_ACQUIRE_OS_* value */
+ u8 padding[5];
+ } vfdev_info;
+
+ struct vf_pf_resc_request resc_request;
+
+ u64 bulletin_addr;
+ u32 bulletin_size;
+ u32 padding;
+};
+
+/* receive side scaling tlv */
+struct vfpf_vport_update_rss_tlv {
+ struct channel_tlv tl;
+
+ u8 update_rss_flags;
+#define VFPF_UPDATE_RSS_CONFIG_FLAG BIT(0)
+#define VFPF_UPDATE_RSS_CAPS_FLAG BIT(1)
+#define VFPF_UPDATE_RSS_IND_TABLE_FLAG BIT(2)
+#define VFPF_UPDATE_RSS_KEY_FLAG BIT(3)
+
+ u8 rss_enable;
+ u8 rss_caps;
+ u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */
+ u16 rss_ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
+ u32 rss_key[T_ETH_RSS_KEY_SIZE];
+};
+
+struct pfvf_storm_stats {
+ u32 address;
+ u32 len;
+};
+
+struct pfvf_stats_info {
+ struct pfvf_storm_stats mstats;
+ struct pfvf_storm_stats pstats;
+ struct pfvf_storm_stats tstats;
+ struct pfvf_storm_stats ustats;
+};
+
+struct pfvf_acquire_resp_tlv {
+ struct pfvf_tlv hdr;
+
+ struct pf_vf_pfdev_info {
+ u32 chip_num;
+ u32 mfw_ver;
+
+ u16 fw_major;
+ u16 fw_minor;
+ u16 fw_rev;
+ u16 fw_eng;
+
+ u64 capabilities;
+#define PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED BIT(0)
+#define PFVF_ACQUIRE_CAP_100G BIT(1) /* If set, 100g PF */
+/* There are old PF versions where the PF might mistakenly override the sanity
+ * mechanism [version-based] and allow a VF that can't be supported to pass
+ * the acquisition phase.
+ * To overcome this, PFs now indicate that they're past that point and the new
+ * VFs would fail probe on the older PFs that fail to do so.
+ */
+#define PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE BIT(2)
+
+ u16 db_size;
+ u8 indices_per_sb;
+ u8 os_type;
+
+ /* These should match the PF's qed_dev values */
+ u16 chip_rev;
+ u8 dev_type;
+
+ u8 padding;
+
+ struct pfvf_stats_info stats_info;
+
+ u8 port_mac[ETH_ALEN];
+ u8 padding2[2];
+ } pfdev_info;
+
+ struct pf_vf_resc {
+#define PFVF_MAX_QUEUES_PER_VF 16
+#define PFVF_MAX_SBS_PER_VF 16
+ struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF];
+ u8 hw_qid[PFVF_MAX_QUEUES_PER_VF];
+ u8 cid[PFVF_MAX_QUEUES_PER_VF];
+
+ u8 num_rxqs;
+ u8 num_txqs;
+ u8 num_sbs;
+ u8 num_mac_filters;
+ u8 num_vlan_filters;
+ u8 num_mc_filters;
+ u8 padding[2];
+ } resc;
+
+ u32 bulletin_size;
+ u32 padding;
+};
+
+struct pfvf_start_queue_resp_tlv {
+ struct pfvf_tlv hdr;
+ u32 offset; /* offset to consumer/producer of queue */
+ u8 padding[4];
+};
+
+/* Setup Queue */
+struct vfpf_start_rxq_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ /* physical addresses */
+ u64 rxq_addr;
+ u64 deprecated_sge_addr;
+ u64 cqe_pbl_addr;
+
+ u16 cqe_pbl_size;
+ u16 hw_sb;
+ u16 rx_qid;
+ u16 hc_rate; /* desired interrupts per sec. */
+
+ u16 bd_max_bytes;
+ u16 stat_id;
+ u8 sb_index;
+ u8 padding[3];
+};
+
+struct vfpf_start_txq_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ /* physical addresses */
+ u64 pbl_addr;
+ u16 pbl_size;
+ u16 stat_id;
+ u16 tx_qid;
+ u16 hw_sb;
+
+ u32 flags; /* VFPF_QUEUE_FLG_X flags */
+ u16 hc_rate; /* desired interrupts per sec. */
+ u8 sb_index;
+ u8 padding[3];
+};
+
+/* Stop RX Queue */
+struct vfpf_stop_rxqs_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u16 rx_qid;
+ u8 num_rxqs;
+ u8 cqe_completion;
+ u8 padding[4];
+};
+
+/* Stop TX Queues */
+struct vfpf_stop_txqs_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u16 tx_qid;
+ u8 num_txqs;
+ u8 padding[5];
+};
+
+struct vfpf_update_rxq_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u64 deprecated_sge_addr[PFVF_MAX_QUEUES_PER_VF];
+
+ u16 rx_qid;
+ u8 num_rxqs;
+ u8 flags;
+#define VFPF_RXQ_UPD_INIT_SGE_DEPRECATE_FLAG BIT(0)
+#define VFPF_RXQ_UPD_COMPLETE_CQE_FLAG BIT(1)
+#define VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG BIT(2)
+
+ u8 padding[4];
+};
+
+/* Set Queue Filters */
+struct vfpf_q_mac_vlan_filter {
+ u32 flags;
+#define VFPF_Q_FILTER_DEST_MAC_VALID 0x01
+#define VFPF_Q_FILTER_VLAN_TAG_VALID 0x02
+#define VFPF_Q_FILTER_SET_MAC 0x100 /* set/clear */
+
+ u8 mac[ETH_ALEN];
+ u16 vlan_tag;
+
+ u8 padding[4];
+};
+
+/* Start a vport */
+struct vfpf_vport_start_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u64 sb_addr[PFVF_MAX_SBS_PER_VF];
+
+ u32 tpa_mode;
+ u16 dep1;
+ u16 mtu;
+
+ u8 vport_id;
+ u8 inner_vlan_removal;
+
+ u8 only_untagged;
+ u8 max_buffers_per_cqe;
+
+ u8 padding[4];
+};
+
+/* Extended tlvs - need to add rss, mcast, accept mode tlvs */
+struct vfpf_vport_update_activate_tlv {
+ struct channel_tlv tl;
+ u8 update_rx;
+ u8 update_tx;
+ u8 active_rx;
+ u8 active_tx;
+};
+
+struct vfpf_vport_update_tx_switch_tlv {
+ struct channel_tlv tl;
+ u8 tx_switching;
+ u8 padding[3];
+};
+
+struct vfpf_vport_update_vlan_strip_tlv {
+ struct channel_tlv tl;
+ u8 remove_vlan;
+ u8 padding[3];
+};
+
+struct vfpf_vport_update_mcast_bin_tlv {
+ struct channel_tlv tl;
+ u8 padding[4];
+
+ u64 bins[8];
+};
+
+struct vfpf_vport_update_accept_param_tlv {
+ struct channel_tlv tl;
+ u8 update_rx_mode;
+ u8 update_tx_mode;
+ u8 rx_accept_filter;
+ u8 tx_accept_filter;
+};
+
+struct vfpf_vport_update_accept_any_vlan_tlv {
+ struct channel_tlv tl;
+ u8 update_accept_any_vlan_flg;
+ u8 accept_any_vlan;
+
+ u8 padding[2];
+};
+
+struct vfpf_vport_update_sge_tpa_tlv {
+ struct channel_tlv tl;
+
+ u16 sge_tpa_flags;
+#define VFPF_TPA_IPV4_EN_FLAG BIT(0)
+#define VFPF_TPA_IPV6_EN_FLAG BIT(1)
+#define VFPF_TPA_PKT_SPLIT_FLAG BIT(2)
+#define VFPF_TPA_HDR_DATA_SPLIT_FLAG BIT(3)
+#define VFPF_TPA_GRO_CONSIST_FLAG BIT(4)
+
+ u8 update_sge_tpa_flags;
+#define VFPF_UPDATE_SGE_DEPRECATED_FLAG BIT(0)
+#define VFPF_UPDATE_TPA_EN_FLAG BIT(1)
+#define VFPF_UPDATE_TPA_PARAM_FLAG BIT(2)
+
+ u8 max_buffers_per_cqe;
+
+ u16 deprecated_sge_buff_size;
+ u16 tpa_max_size;
+ u16 tpa_min_size_to_start;
+ u16 tpa_min_size_to_cont;
+
+ u8 tpa_max_aggs_num;
+ u8 padding[7];
+};
+
+/* Primary tlv as a header for various extended tlvs for
+ * various functionalities in vport update ramrod.
+ */
+struct vfpf_vport_update_tlv {
+ struct vfpf_first_tlv first_tlv;
+};
+
+struct vfpf_ucast_filter_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u8 opcode;
+ u8 type;
+
+ u8 mac[ETH_ALEN];
+
+ u16 vlan;
+ u16 padding[3];
+};
+
+struct tlv_buffer_size {
+ u8 tlv_buffer[TLV_BUFFER_SIZE];
+};
+
+union vfpf_tlvs {
+ struct vfpf_first_tlv first_tlv;
+ struct vfpf_acquire_tlv acquire;
+ struct vfpf_start_rxq_tlv start_rxq;
+ struct vfpf_start_txq_tlv start_txq;
+ struct vfpf_stop_rxqs_tlv stop_rxqs;
+ struct vfpf_stop_txqs_tlv stop_txqs;
+ struct vfpf_update_rxq_tlv update_rxq;
+ struct vfpf_vport_start_tlv start_vport;
+ struct vfpf_vport_update_tlv vport_update;
+ struct vfpf_ucast_filter_tlv ucast_filter;
+ struct channel_list_end_tlv list_end;
+ struct tlv_buffer_size tlv_buf_size;
+};
+
+union pfvf_tlvs {
+ struct pfvf_def_resp_tlv default_resp;
+ struct pfvf_acquire_resp_tlv acquire_resp;
+ struct tlv_buffer_size tlv_buf_size;
+ struct pfvf_start_queue_resp_tlv queue_start;
+};
+
+enum qed_bulletin_bit {
+ /* Alert the VF that a forced MAC was set by the PF */
+ MAC_ADDR_FORCED = 0,
+ /* Alert the VF that a forced VLAN was set by the PF */
+ VLAN_ADDR_FORCED = 2,
+
+ /* Indicate that `default_only_untagged' contains actual data */
+ VFPF_BULLETIN_UNTAGGED_DEFAULT = 3,
+ VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED = 4,
+
+ /* Alert the VF that suggested mac was sent by the PF.
+ * MAC_ADDR will be disabled in case MAC_ADDR_FORCED is set.
+ */
+ VFPF_BULLETIN_MAC_ADDR = 5
+};
+
+struct qed_bulletin_content {
+ /* crc of structure to ensure is not in mid-update */
+ u32 crc;
+
+ u32 version;
+
+ /* bitmap indicating which fields hold valid values */
+ u64 valid_bitmap;
+
+ /* used for MAC_ADDR or MAC_ADDR_FORCED */
+ u8 mac[ETH_ALEN];
+
+ /* If valid, 1 => only untagged Rx if no vlan is configured */
+ u8 default_only_untagged;
+ u8 padding;
+
+ /* The following is a 'copy' of qed_mcp_link_state,
+ * qed_mcp_link_params and qed_mcp_link_capabilities. Since it's
+ * possible the structs will increase further along the road we cannot
+ * have it here; Instead we need to have all of its fields.
+ */
+ u8 req_autoneg;
+ u8 req_autoneg_pause;
+ u8 req_forced_rx;
+ u8 req_forced_tx;
+ u8 padding2[4];
+
+ u32 req_adv_speed;
+ u32 req_forced_speed;
+ u32 req_loopback;
+ u32 padding3;
+
+ u8 link_up;
+ u8 full_duplex;
+ u8 autoneg;
+ u8 autoneg_complete;
+ u8 parallel_detection;
+ u8 pfc_enabled;
+ u8 partner_tx_flow_ctrl_en;
+ u8 partner_rx_flow_ctrl_en;
+ u8 partner_adv_pause;
+ u8 sfp_tx_fault;
+ u8 padding4[6];
+
+ u32 speed;
+ u32 partner_adv_speed;
+
+ u32 capability_speed;
+
+ /* Forced vlan */
+ u16 pvid;
+ u16 padding5;
+};
+
+struct qed_bulletin {
+ dma_addr_t phys;
+ struct qed_bulletin_content *p_virt;
+ u32 size;
+};
+
+enum {
+ CHANNEL_TLV_NONE, /* ends tlv sequence */
+ CHANNEL_TLV_ACQUIRE,
+ CHANNEL_TLV_VPORT_START,
+ CHANNEL_TLV_VPORT_UPDATE,
+ CHANNEL_TLV_VPORT_TEARDOWN,
+ CHANNEL_TLV_START_RXQ,
+ CHANNEL_TLV_START_TXQ,
+ CHANNEL_TLV_STOP_RXQS,
+ CHANNEL_TLV_STOP_TXQS,
+ CHANNEL_TLV_UPDATE_RXQ,
+ CHANNEL_TLV_INT_CLEANUP,
+ CHANNEL_TLV_CLOSE,
+ CHANNEL_TLV_RELEASE,
+ CHANNEL_TLV_LIST_END,
+ CHANNEL_TLV_UCAST_FILTER,
+ CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
+ CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH,
+ CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP,
+ CHANNEL_TLV_VPORT_UPDATE_MCAST,
+ CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM,
+ CHANNEL_TLV_VPORT_UPDATE_RSS,
+ CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN,
+ CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
+ CHANNEL_TLV_MAX,
+
+ /* Required for iterating over vport-update tlvs.
+ * Will break in case non-sequential vport-update tlvs.
+ */
+ CHANNEL_TLV_VPORT_UPDATE_MAX = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA + 1,
+};
+
+/* This data is held in the qed_hwfn structure for VFs only. */
+struct qed_vf_iov {
+ union vfpf_tlvs *vf2pf_request;
+ dma_addr_t vf2pf_request_phys;
+ union pfvf_tlvs *pf2vf_reply;
+ dma_addr_t pf2vf_reply_phys;
+
+ /* Should be taken whenever the mailbox buffers are accessed */
+ struct mutex mutex;
+ u8 *offset;
+
+ /* Bulletin Board */
+ struct qed_bulletin bulletin;
+ struct qed_bulletin_content bulletin_shadow;
+
+ /* we set aside a copy of the acquire response */
+ struct pfvf_acquire_resp_tlv acquire_resp;
+};
+
+#ifdef CONFIG_QED_SRIOV
+/**
+ * @brief Read the VF bulletin and act on it if needed
+ *
+ * @param p_hwfn
+ * @param p_change - qed fills 1 iff bulletin board has changed, 0 otherwise.
+ *
+ * @return enum _qed_status
+ */
+int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change);
+
+/**
+ * @brief Get link paramters for VF from qed
+ *
+ * @param p_hwfn
+ * @param params - the link params structure to be filled for the VF
+ */
+void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_params *params);
+
+/**
+ * @brief Get link state for VF from qed
+ *
+ * @param p_hwfn
+ * @param link - the link state structure to be filled for the VF
+ */
+void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_state *link);
+
+/**
+ * @brief Get link capabilities for VF from qed
+ *
+ * @param p_hwfn
+ * @param p_link_caps - the link capabilities structure to be filled for the VF
+ */
+void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_capabilities *p_link_caps);
+
+/**
+ * @brief Get number of Rx queues allocated for VF by qed
+ *
+ * @param p_hwfn
+ * @param num_rxqs - allocated RX queues
+ */
+void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs);
+
+/**
+ * @brief Get port mac address for VF
+ *
+ * @param p_hwfn
+ * @param port_mac - destination location for port mac
+ */
+void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac);
+
+/**
+ * @brief Get number of VLAN filters allocated for VF by qed
+ *
+ * @param p_hwfn
+ * @param num_rxqs - allocated VLAN filters
+ */
+void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
+ u8 *num_vlan_filters);
+
+/**
+ * @brief Check if VF can set a MAC address
+ *
+ * @param p_hwfn
+ * @param mac
+ *
+ * @return bool
+ */
+bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac);
+
+/**
+ * @brief Set firmware version information in dev_info from VFs acquire response tlv
+ *
+ * @param p_hwfn
+ * @param fw_major
+ * @param fw_minor
+ * @param fw_rev
+ * @param fw_eng
+ */
+void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
+ u16 *fw_major, u16 *fw_minor,
+ u16 *fw_rev, u16 *fw_eng);
+
+/**
+ * @brief hw preparation for VF
+ * sends ACQUIRE message
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief VF - start the RX Queue by sending a message to the PF
+ * @param p_hwfn
+ * @param cid - zero based within the VF
+ * @param rx_queue_id - zero based within the VF
+ * @param sb - VF status block for this queue
+ * @param sb_index - Index within the status block
+ * @param bd_max_bytes - maximum number of bytes per bd
+ * @param bd_chain_phys_addr - physical address of bd chain
+ * @param cqe_pbl_addr - physical address of pbl
+ * @param cqe_pbl_size - pbl size
+ * @param pp_prod - pointer to the producer to be
+ * used in fastpath
+ *
+ * @return int
+ */
+int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
+ u8 rx_queue_id,
+ u16 sb,
+ u8 sb_index,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size, void __iomem **pp_prod);
+
+/**
+ * @brief VF - start the TX queue by sending a message to the
+ * PF.
+ *
+ * @param p_hwfn
+ * @param tx_queue_id - zero based within the VF
+ * @param sb - status block for this queue
+ * @param sb_index - index within the status block
+ * @param bd_chain_phys_addr - physical address of tx chain
+ * @param pp_doorbell - pointer to address to which to
+ * write the doorbell too..
+ *
+ * @return int
+ */
+int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
+ u16 tx_queue_id,
+ u16 sb,
+ u8 sb_index,
+ dma_addr_t pbl_addr,
+ u16 pbl_size, void __iomem **pp_doorbell);
+
+/**
+ * @brief VF - stop the RX queue by sending a message to the PF
+ *
+ * @param p_hwfn
+ * @param rx_qid
+ * @param cqe_completion
+ *
+ * @return int
+ */
+int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
+ u16 rx_qid, bool cqe_completion);
+
+/**
+ * @brief VF - stop the TX queue by sending a message to the PF
+ *
+ * @param p_hwfn
+ * @param tx_qid
+ *
+ * @return int
+ */
+int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid);
+
+/**
+ * @brief VF - send a vport update command
+ *
+ * @param p_hwfn
+ * @param params
+ *
+ * @return int
+ */
+int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_params);
+
+/**
+ *
+ * @brief VF - send a close message to PF
+ *
+ * @param p_hwfn
+ *
+ * @return enum _qed_status
+ */
+int qed_vf_pf_reset(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief VF - free vf`s memories
+ *
+ * @param p_hwfn
+ *
+ * @return enum _qed_status
+ */
+int qed_vf_pf_release(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_vf_get_igu_sb_id - Get the IGU SB ID for a given
+ * sb_id. For VFs igu sbs don't have to be contiguous
+ *
+ * @param p_hwfn
+ * @param sb_id
+ *
+ * @return INLINE u16
+ */
+u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
+
+/**
+ * @brief qed_vf_pf_vport_start - perform vport start for VF.
+ *
+ * @param p_hwfn
+ * @param vport_id
+ * @param mtu
+ * @param inner_vlan_removal
+ * @param tpa_mode
+ * @param max_buffers_per_cqe,
+ * @param only_untagged - default behavior regarding vlan acceptance
+ *
+ * @return enum _qed_status
+ */
+int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
+ u8 vport_id,
+ u16 mtu,
+ u8 inner_vlan_removal,
+ enum qed_tpa_mode tpa_mode,
+ u8 max_buffers_per_cqe, u8 only_untagged);
+
+/**
+ * @brief qed_vf_pf_vport_stop - stop the VF's vport
+ *
+ * @param p_hwfn
+ *
+ * @return enum _qed_status
+ */
+int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn);
+
+int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
+ struct qed_filter_ucast *p_param);
+
+void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
+ struct qed_filter_mcast *p_filter_cmd);
+
+/**
+ * @brief qed_vf_pf_int_cleanup - clean the SB of the VF
+ *
+ * @param p_hwfn
+ *
+ * @return enum _qed_status
+ */
+int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief - return the link params in a given bulletin board
+ *
+ * @param p_hwfn
+ * @param p_params - pointer to a struct to fill with link params
+ * @param p_bulletin
+ */
+void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_params *p_params,
+ struct qed_bulletin_content *p_bulletin);
+
+/**
+ * @brief - return the link state in a given bulletin board
+ *
+ * @param p_hwfn
+ * @param p_link - pointer to a struct to fill with link state
+ * @param p_bulletin
+ */
+void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_state *p_link,
+ struct qed_bulletin_content *p_bulletin);
+
+/**
+ * @brief - return the link capabilities in a given bulletin board
+ *
+ * @param p_hwfn
+ * @param p_link - pointer to a struct to fill with link capabilities
+ * @param p_bulletin
+ */
+void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_capabilities *p_link_caps,
+ struct qed_bulletin_content *p_bulletin);
+
+void qed_iov_vf_task(struct work_struct *work);
+#else
+static inline void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_params *params)
+{
+}
+
+static inline void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_state *link)
+{
+}
+
+static inline void
+qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_capabilities *p_link_caps)
+{
+}
+
+static inline void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs)
+{
+}
+
+static inline void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac)
+{
+}
+
+static inline void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
+ u8 *num_vlan_filters)
+{
+}
+
+static inline bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac)
+{
+ return false;
+}
+
+static inline void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
+ u16 *fw_major, u16 *fw_minor,
+ u16 *fw_rev, u16 *fw_eng)
+{
+}
+
+static inline int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
+{
+ return -EINVAL;
+}
+
+static inline int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
+ u8 rx_queue_id,
+ u16 sb,
+ u8 sb_index,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_adr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size, void __iomem **pp_prod)
+{
+ return -EINVAL;
+}
+
+static inline int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
+ u16 tx_queue_id,
+ u16 sb,
+ u8 sb_index,
+ dma_addr_t pbl_addr,
+ u16 pbl_size, void __iomem **pp_doorbell)
+{
+ return -EINVAL;
+}
+
+static inline int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
+ u16 rx_qid, bool cqe_completion)
+{
+ return -EINVAL;
+}
+
+static inline int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid)
+{
+ return -EINVAL;
+}
+
+static inline int
+qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_params)
+{
+ return -EINVAL;
+}
+
+static inline int qed_vf_pf_reset(struct qed_hwfn *p_hwfn)
+{
+ return -EINVAL;
+}
+
+static inline int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
+{
+ return -EINVAL;
+}
+
+static inline u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
+{
+ return 0;
+}
+
+static inline int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
+ u8 vport_id,
+ u16 mtu,
+ u8 inner_vlan_removal,
+ enum qed_tpa_mode tpa_mode,
+ u8 max_buffers_per_cqe,
+ u8 only_untagged)
+{
+ return -EINVAL;
+}
+
+static inline int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn)
+{
+ return -EINVAL;
+}
+
+static inline int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
+ struct qed_filter_ucast *p_param)
+{
+ return -EINVAL;
+}
+
+static inline void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
+ struct qed_filter_mcast *p_filter_cmd)
+{
+}
+
+static inline int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
+{
+ return -EINVAL;
+}
+
+static inline void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_params
+ *p_params,
+ struct qed_bulletin_content
+ *p_bulletin)
+{
+}
+
+static inline void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_state *p_link,
+ struct qed_bulletin_content
+ *p_bulletin)
+{
+}
+
+static inline void
+__qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_capabilities *p_link_caps,
+ struct qed_bulletin_content *p_bulletin)
+{
+}
+
+static inline void qed_iov_vf_task(struct work_struct *work)
+{
+}
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index d023251..47d6b22 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -25,15 +25,13 @@
#define QEDE_MAJOR_VERSION 8
#define QEDE_MINOR_VERSION 7
-#define QEDE_REVISION_VERSION 0
-#define QEDE_ENGINEERING_VERSION 0
+#define QEDE_REVISION_VERSION 1
+#define QEDE_ENGINEERING_VERSION 20
#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
__stringify(QEDE_MINOR_VERSION) "." \
__stringify(QEDE_REVISION_VERSION) "." \
__stringify(QEDE_ENGINEERING_VERSION)
-#define QEDE_ETH_INTERFACE_VERSION 300
-
#define DRV_MODULE_SYM qede
struct qede_stats {
@@ -61,16 +59,16 @@ struct qede_stats {
/* port */
u64 rx_64_byte_packets;
- u64 rx_127_byte_packets;
- u64 rx_255_byte_packets;
- u64 rx_511_byte_packets;
- u64 rx_1023_byte_packets;
- u64 rx_1518_byte_packets;
- u64 rx_1522_byte_packets;
- u64 rx_2047_byte_packets;
- u64 rx_4095_byte_packets;
- u64 rx_9216_byte_packets;
- u64 rx_16383_byte_packets;
+ u64 rx_65_to_127_byte_packets;
+ u64 rx_128_to_255_byte_packets;
+ u64 rx_256_to_511_byte_packets;
+ u64 rx_512_to_1023_byte_packets;
+ u64 rx_1024_to_1518_byte_packets;
+ u64 rx_1519_to_1522_byte_packets;
+ u64 rx_1519_to_2047_byte_packets;
+ u64 rx_2048_to_4095_byte_packets;
+ u64 rx_4096_to_9216_byte_packets;
+ u64 rx_9217_to_16383_byte_packets;
u64 rx_crc_errors;
u64 rx_mac_crtl_frames;
u64 rx_pause_frames;
@@ -114,6 +112,10 @@ struct qede_dev {
u32 dp_module;
u8 dp_level;
+ u32 flags;
+#define QEDE_FLAG_IS_VF BIT(0)
+#define IS_VF(edev) (!!((edev)->flags & QEDE_FLAG_IS_VF))
+
const struct qed_eth_ops *ops;
struct qed_dev_eth_info dev_info;
@@ -156,6 +158,10 @@ struct qede_dev {
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
struct qede_stats stats;
+#define QEDE_RSS_INDIR_INITED BIT(0)
+#define QEDE_RSS_KEY_INITED BIT(1)
+#define QEDE_RSS_CAPS_INITED BIT(2)
+ u32 rss_params_inited; /* bit-field to track initialized rss params */
struct qed_update_vport_rss_params rss_params;
u16 q_num_rx_buffers; /* Must be a power of two */
u16 q_num_tx_buffers; /* Must be a power of two */
@@ -167,6 +173,8 @@ struct qede_dev {
bool accept_any_vlan;
struct delayed_work sp_task;
unsigned long sp_flags;
+ u16 vxlan_dst_port;
+ u16 geneve_dst_port;
};
enum QEDE_STATE {
@@ -286,8 +294,11 @@ struct qede_fastpath {
#define QEDE_CSUM_ERROR BIT(0)
#define QEDE_CSUM_UNNECESSARY BIT(1)
+#define QEDE_TUNN_CSUM_UNNECESSARY BIT(2)
-#define QEDE_SP_RX_MODE 1
+#define QEDE_SP_RX_MODE 1
+#define QEDE_SP_VXLAN_PORT_CONFIG 2
+#define QEDE_SP_GENEVE_PORT_CONFIG 3
union qede_reload_args {
u16 mtu;
@@ -301,6 +312,10 @@ void qede_reload(struct qede_dev *edev,
union qede_reload_args *args);
int qede_change_mtu(struct net_device *dev, int new_mtu);
void qede_fill_by_demand_stats(struct qede_dev *edev);
+bool qede_has_rx_work(struct qede_rx_queue *rxq);
+int qede_txq_has_work(struct qede_tx_queue *txq);
+void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev,
+ u8 count);
#define RX_RING_SIZE_POW 13
#define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW))
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index c49dc10..1bc7535 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -9,6 +9,7 @@
#include <linux/version.h>
#include <linux/types.h>
#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/string.h>
#include <linux/pci.h>
@@ -27,6 +28,9 @@
#define QEDE_RQSTAT_STRING(stat_name) (#stat_name)
#define QEDE_RQSTAT(stat_name) \
{QEDE_RQSTAT_OFFSET(stat_name), QEDE_RQSTAT_STRING(stat_name)}
+
+#define QEDE_SELFTEST_POLL_COUNT 100
+
static const struct {
u64 offset;
char string[ETH_GSTRING_LEN];
@@ -59,16 +63,16 @@ static const struct {
QEDE_STAT(tx_bcast_pkts),
QEDE_PF_STAT(rx_64_byte_packets),
- QEDE_PF_STAT(rx_127_byte_packets),
- QEDE_PF_STAT(rx_255_byte_packets),
- QEDE_PF_STAT(rx_511_byte_packets),
- QEDE_PF_STAT(rx_1023_byte_packets),
- QEDE_PF_STAT(rx_1518_byte_packets),
- QEDE_PF_STAT(rx_1522_byte_packets),
- QEDE_PF_STAT(rx_2047_byte_packets),
- QEDE_PF_STAT(rx_4095_byte_packets),
- QEDE_PF_STAT(rx_9216_byte_packets),
- QEDE_PF_STAT(rx_16383_byte_packets),
+ QEDE_PF_STAT(rx_65_to_127_byte_packets),
+ QEDE_PF_STAT(rx_128_to_255_byte_packets),
+ QEDE_PF_STAT(rx_256_to_511_byte_packets),
+ QEDE_PF_STAT(rx_512_to_1023_byte_packets),
+ QEDE_PF_STAT(rx_1024_to_1518_byte_packets),
+ QEDE_PF_STAT(rx_1519_to_1522_byte_packets),
+ QEDE_PF_STAT(rx_1519_to_2047_byte_packets),
+ QEDE_PF_STAT(rx_2048_to_4095_byte_packets),
+ QEDE_PF_STAT(rx_4096_to_9216_byte_packets),
+ QEDE_PF_STAT(rx_9217_to_16383_byte_packets),
QEDE_PF_STAT(tx_64_byte_packets),
QEDE_PF_STAT(tx_65_to_127_byte_packets),
QEDE_PF_STAT(tx_128_to_255_byte_packets),
@@ -116,11 +120,39 @@ static const struct {
#define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr)
+enum {
+ QEDE_PRI_FLAG_CMT,
+ QEDE_PRI_FLAG_LEN,
+};
+
+static const char qede_private_arr[QEDE_PRI_FLAG_LEN][ETH_GSTRING_LEN] = {
+ "Coupled-Function",
+};
+
+enum qede_ethtool_tests {
+ QEDE_ETHTOOL_INT_LOOPBACK,
+ QEDE_ETHTOOL_INTERRUPT_TEST,
+ QEDE_ETHTOOL_MEMORY_TEST,
+ QEDE_ETHTOOL_REGISTER_TEST,
+ QEDE_ETHTOOL_CLOCK_TEST,
+ QEDE_ETHTOOL_TEST_MAX
+};
+
+static const char qede_tests_str_arr[QEDE_ETHTOOL_TEST_MAX][ETH_GSTRING_LEN] = {
+ "Internal loopback (offline)",
+ "Interrupt (online)\t",
+ "Memory (online)\t\t",
+ "Register (online)\t",
+ "Clock (online)\t\t",
+};
+
static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
{
int i, j, k;
for (i = 0, j = 0; i < QEDE_NUM_STATS; i++) {
+ if (IS_VF(edev) && qede_stats_arr[i].pf_only)
+ continue;
strcpy(buf + j * ETH_GSTRING_LEN,
qede_stats_arr[i].string);
j++;
@@ -139,6 +171,14 @@ static void qede_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
case ETH_SS_STATS:
qede_get_strings_stats(edev, buf);
break;
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(buf, qede_private_arr,
+ ETH_GSTRING_LEN * QEDE_PRI_FLAG_LEN);
+ break;
+ case ETH_SS_TEST:
+ memcpy(buf, qede_tests_str_arr,
+ ETH_GSTRING_LEN * QEDE_ETHTOOL_TEST_MAX);
+ break;
default:
DP_VERBOSE(edev, QED_MSG_DEBUG,
"Unsupported stringset 0x%08x\n", stringset);
@@ -156,8 +196,11 @@ static void qede_get_ethtool_stats(struct net_device *dev,
mutex_lock(&edev->qede_lock);
- for (sidx = 0; sidx < QEDE_NUM_STATS; sidx++)
+ for (sidx = 0; sidx < QEDE_NUM_STATS; sidx++) {
+ if (IS_VF(edev) && qede_stats_arr[sidx].pf_only)
+ continue;
buf[cnt++] = QEDE_STATS_DATA(edev, sidx);
+ }
for (sidx = 0; sidx < QEDE_NUM_RQSTATS; sidx++) {
buf[cnt] = 0;
@@ -176,8 +219,18 @@ static int qede_get_sset_count(struct net_device *dev, int stringset)
switch (stringset) {
case ETH_SS_STATS:
- return num_stats + QEDE_NUM_RQSTATS;
+ if (IS_VF(edev)) {
+ int i;
+ for (i = 0; i < QEDE_NUM_STATS; i++)
+ if (qede_stats_arr[i].pf_only)
+ num_stats--;
+ }
+ return num_stats + QEDE_NUM_RQSTATS;
+ case ETH_SS_PRIV_FLAGS:
+ return QEDE_PRI_FLAG_LEN;
+ case ETH_SS_TEST:
+ return QEDE_ETHTOOL_TEST_MAX;
default:
DP_VERBOSE(edev, QED_MSG_DEBUG,
"Unsupported stringset 0x%08x\n", stringset);
@@ -185,6 +238,13 @@ static int qede_get_sset_count(struct net_device *dev, int stringset)
}
}
+static u32 qede_get_priv_flags(struct net_device *dev)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ return (!!(edev->dev_info.common.num_hwfns > 1)) << QEDE_PRI_FLAG_CMT;
+}
+
static int qede_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct qede_dev *edev = netdev_priv(dev);
@@ -217,9 +277,9 @@ static int qede_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
struct qed_link_params params;
u32 speed;
- if (!edev->dev_info.common.is_mf_default) {
+ if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
DP_INFO(edev,
- "Link parameters can not be changed in non-default mode\n");
+ "Link settings are not allowed to be changed\n");
return -EOPNOTSUPP;
}
@@ -328,6 +388,12 @@ static int qede_nway_reset(struct net_device *dev)
struct qed_link_output current_link;
struct qed_link_params link_params;
+ if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
+ DP_INFO(edev,
+ "Link settings are not allowed to be changed\n");
+ return -EOPNOTSUPP;
+ }
+
if (!netif_running(dev))
return 0;
@@ -428,9 +494,9 @@ static int qede_set_pauseparam(struct net_device *dev,
struct qed_link_params params;
struct qed_link_output current_link;
- if (!edev->dev_info.common.is_mf_default) {
+ if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
DP_INFO(edev,
- "Pause parameters can not be updated in non-default mode\n");
+ "Pause settings are not allowed to be changed\n");
return -EOPNOTSUPP;
}
@@ -569,6 +635,497 @@ static int qede_set_phys_id(struct net_device *dev,
return 0;
}
+static int qede_get_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info)
+{
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case UDP_V4_FLOW:
+ if (edev->rss_params.rss_caps & QED_RSS_IPV4_UDP)
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case UDP_V6_FLOW:
+ if (edev->rss_params.rss_caps & QED_RSS_IPV6_UDP)
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ break;
+ default:
+ info->data = 0;
+ break;
+ }
+
+ return 0;
+}
+
+static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+ u32 *rules __always_unused)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ info->data = edev->num_rss;
+ return 0;
+ case ETHTOOL_GRXFH:
+ return qede_get_rss_flags(edev, info);
+ default:
+ DP_ERR(edev, "Command parameters not supported\n");
+ return -EOPNOTSUPP;
+ }
+}
+
+static int qede_set_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info)
+{
+ struct qed_update_vport_params vport_update_params;
+ u8 set_caps = 0, clr_caps = 0;
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Set rss flags command parameters: flow type = %d, data = %llu\n",
+ info->flow_type, info->data);
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ /* For TCP only 4-tuple hash is supported */
+ if (info->data ^ (RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ DP_INFO(edev, "Command parameters not supported\n");
+ return -EINVAL;
+ }
+ return 0;
+ case UDP_V4_FLOW:
+ /* For UDP either 2-tuple hash or 4-tuple hash is supported */
+ if (info->data == (RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ set_caps = QED_RSS_IPV4_UDP;
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "UDP 4-tuple enabled\n");
+ } else if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
+ clr_caps = QED_RSS_IPV4_UDP;
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "UDP 4-tuple disabled\n");
+ } else {
+ return -EINVAL;
+ }
+ break;
+ case UDP_V6_FLOW:
+ /* For UDP either 2-tuple hash or 4-tuple hash is supported */
+ if (info->data == (RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ set_caps = QED_RSS_IPV6_UDP;
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "UDP 4-tuple enabled\n");
+ } else if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
+ clr_caps = QED_RSS_IPV6_UDP;
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "UDP 4-tuple disabled\n");
+ } else {
+ return -EINVAL;
+ }
+ break;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ /* For IP only 2-tuple hash is supported */
+ if (info->data ^ (RXH_IP_SRC | RXH_IP_DST)) {
+ DP_INFO(edev, "Command parameters not supported\n");
+ return -EINVAL;
+ }
+ return 0;
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case IP_USER_FLOW:
+ case ETHER_FLOW:
+ /* RSS is not supported for these protocols */
+ if (info->data) {
+ DP_INFO(edev, "Command parameters not supported\n");
+ return -EINVAL;
+ }
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ /* No action is needed if there is no change in the rss capability */
+ if (edev->rss_params.rss_caps == ((edev->rss_params.rss_caps &
+ ~clr_caps) | set_caps))
+ return 0;
+
+ /* Update internal configuration */
+ edev->rss_params.rss_caps = (edev->rss_params.rss_caps & ~clr_caps) |
+ set_caps;
+ edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
+
+ /* Re-configure if possible */
+ if (netif_running(edev->ndev)) {
+ memset(&vport_update_params, 0, sizeof(vport_update_params));
+ vport_update_params.update_rss_flg = 1;
+ vport_update_params.vport_id = 0;
+ memcpy(&vport_update_params.rss_params, &edev->rss_params,
+ sizeof(vport_update_params.rss_params));
+ return edev->ops->vport_update(edev->cdev,
+ &vport_update_params);
+ }
+
+ return 0;
+}
+
+static int qede_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ switch (info->cmd) {
+ case ETHTOOL_SRXFH:
+ return qede_set_rss_flags(edev, info);
+ default:
+ DP_INFO(edev, "Command parameters not supported\n");
+ return -EOPNOTSUPP;
+ }
+}
+
+static u32 qede_get_rxfh_indir_size(struct net_device *dev)
+{
+ return QED_RSS_IND_TABLE_SIZE;
+}
+
+static u32 qede_get_rxfh_key_size(struct net_device *dev)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ return sizeof(edev->rss_params.rss_key);
+}
+
+static int qede_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ int i;
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ if (!indir)
+ return 0;
+
+ for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++)
+ indir[i] = edev->rss_params.rss_ind_table[i];
+
+ if (key)
+ memcpy(key, edev->rss_params.rss_key,
+ qede_get_rxfh_key_size(dev));
+
+ return 0;
+}
+
+static int qede_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct qed_update_vport_params vport_update_params;
+ struct qede_dev *edev = netdev_priv(dev);
+ int i;
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ if (!indir && !key)
+ return 0;
+
+ if (indir) {
+ for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++)
+ edev->rss_params.rss_ind_table[i] = indir[i];
+ edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
+ }
+
+ if (key) {
+ memcpy(&edev->rss_params.rss_key, key,
+ qede_get_rxfh_key_size(dev));
+ edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
+ }
+
+ if (netif_running(edev->ndev)) {
+ memset(&vport_update_params, 0, sizeof(vport_update_params));
+ vport_update_params.update_rss_flg = 1;
+ vport_update_params.vport_id = 0;
+ memcpy(&vport_update_params.rss_params, &edev->rss_params,
+ sizeof(vport_update_params.rss_params));
+ return edev->ops->vport_update(edev->cdev,
+ &vport_update_params);
+ }
+
+ return 0;
+}
+
+/* This function enables the interrupt generation and the NAPI on the device */
+static void qede_netif_start(struct qede_dev *edev)
+{
+ int i;
+
+ if (!netif_running(edev->ndev))
+ return;
+
+ for_each_rss(i) {
+ /* Update and reenable interrupts */
+ qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_ENABLE, 1);
+ napi_enable(&edev->fp_array[i].napi);
+ }
+}
+
+/* This function disables the NAPI and the interrupt generation on the device */
+static void qede_netif_stop(struct qede_dev *edev)
+{
+ int i;
+
+ for_each_rss(i) {
+ napi_disable(&edev->fp_array[i].napi);
+ /* Disable interrupts */
+ qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_DISABLE, 0);
+ }
+}
+
+static int qede_selftest_transmit_traffic(struct qede_dev *edev,
+ struct sk_buff *skb)
+{
+ struct qede_tx_queue *txq = &edev->fp_array[0].txqs[0];
+ struct eth_tx_1st_bd *first_bd;
+ dma_addr_t mapping;
+ int i, idx, val;
+
+ /* Fill the entry in the SW ring and the BDs in the FW ring */
+ idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
+ txq->sw_tx_ring[idx].skb = skb;
+ first_bd = qed_chain_produce(&txq->tx_pbl);
+ memset(first_bd, 0, sizeof(*first_bd));
+ val = 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
+ first_bd->data.bd_flags.bitfields = val;
+
+ /* Map skb linear data for DMA and set in the first BD */
+ mapping = dma_map_single(&edev->pdev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
+ DP_NOTICE(edev, "SKB mapping failed\n");
+ return -ENOMEM;
+ }
+ BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
+
+ /* update the first BD with the actual num BDs */
+ first_bd->data.nbds = 1;
+ txq->sw_tx_prod++;
+ /* 'next page' entries are counted in the producer value */
+ val = cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
+ txq->tx_db.data.bd_prod = val;
+
+ /* wmb makes sure that the BDs data is updated before updating the
+ * producer, otherwise FW may read old data from the BDs.
+ */
+ wmb();
+ barrier();
+ writel(txq->tx_db.raw, txq->doorbell_addr);
+
+ /* mmiowb is needed to synchronize doorbell writes from more than one
+ * processor. It guarantees that the write arrives to the device before
+ * the queue lock is released and another start_xmit is called (possibly
+ * on another CPU). Without this barrier, the next doorbell can bypass
+ * this doorbell. This is applicable to IA64/Altix systems.
+ */
+ mmiowb();
+
+ for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) {
+ if (qede_txq_has_work(txq))
+ break;
+ usleep_range(100, 200);
+ }
+
+ if (!qede_txq_has_work(txq)) {
+ DP_NOTICE(edev, "Tx completion didn't happen\n");
+ return -1;
+ }
+
+ first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
+ dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+ BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE);
+ txq->sw_tx_cons++;
+ txq->sw_tx_ring[idx].skb = NULL;
+
+ return 0;
+}
+
+static int qede_selftest_receive_traffic(struct qede_dev *edev)
+{
+ struct qede_rx_queue *rxq = edev->fp_array[0].rxq;
+ u16 hw_comp_cons, sw_comp_cons, sw_rx_index, len;
+ struct eth_fast_path_rx_reg_cqe *fp_cqe;
+ struct sw_rx_data *sw_rx_data;
+ union eth_rx_cqe *cqe;
+ u8 *data_ptr;
+ int i;
+
+ /* The packet is expected to receive on rx-queue 0 even though RSS is
+ * enabled. This is because the queue 0 is configured as the default
+ * queue and that the loopback traffic is not IP.
+ */
+ for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) {
+ if (qede_has_rx_work(rxq))
+ break;
+ usleep_range(100, 200);
+ }
+
+ if (!qede_has_rx_work(rxq)) {
+ DP_NOTICE(edev, "Failed to receive the traffic\n");
+ return -1;
+ }
+
+ hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
+ sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+ /* Memory barrier to prevent the CPU from doing speculative reads of CQE
+ * / BD before reading hw_comp_cons. If the CQE is read before it is
+ * written by FW, then FW writes CQE and SB, and then the CPU reads the
+ * hw_comp_cons, it will use an old CQE.
+ */
+ rmb();
+
+ /* Get the CQE from the completion ring */
+ cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
+
+ /* Get the data from the SW ring */
+ sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
+ sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
+ fp_cqe = &cqe->fast_path_regular;
+ len = le16_to_cpu(fp_cqe->len_on_first_bd);
+ data_ptr = (u8 *)(page_address(sw_rx_data->data) +
+ fp_cqe->placement_offset + sw_rx_data->page_offset);
+ for (i = ETH_HLEN; i < len; i++)
+ if (data_ptr[i] != (unsigned char)(i & 0xff)) {
+ DP_NOTICE(edev, "Loopback test failed\n");
+ qede_recycle_rx_bd_ring(rxq, edev, 1);
+ return -1;
+ }
+
+ qede_recycle_rx_bd_ring(rxq, edev, 1);
+
+ return 0;
+}
+
+static int qede_selftest_run_loopback(struct qede_dev *edev, u32 loopback_mode)
+{
+ struct qed_link_params link_params;
+ struct sk_buff *skb = NULL;
+ int rc = 0, i;
+ u32 pkt_size;
+ u8 *packet;
+
+ if (!netif_running(edev->ndev)) {
+ DP_NOTICE(edev, "Interface is down\n");
+ return -EINVAL;
+ }
+
+ qede_netif_stop(edev);
+
+ /* Bring up the link in Loopback mode */
+ memset(&link_params, 0, sizeof(link_params));
+ link_params.link_up = true;
+ link_params.override_flags = QED_LINK_OVERRIDE_LOOPBACK_MODE;
+ link_params.loopback_mode = loopback_mode;
+ edev->ops->common->set_link(edev->cdev, &link_params);
+
+ /* Wait for loopback configuration to apply */
+ msleep_interruptible(500);
+
+ /* prepare the loopback packet */
+ pkt_size = edev->ndev->mtu + ETH_HLEN;
+
+ skb = netdev_alloc_skb(edev->ndev, pkt_size);
+ if (!skb) {
+ DP_INFO(edev, "Can't allocate skb\n");
+ rc = -ENOMEM;
+ goto test_loopback_exit;
+ }
+ packet = skb_put(skb, pkt_size);
+ ether_addr_copy(packet, edev->ndev->dev_addr);
+ ether_addr_copy(packet + ETH_ALEN, edev->ndev->dev_addr);
+ memset(packet + (2 * ETH_ALEN), 0x77, (ETH_HLEN - (2 * ETH_ALEN)));
+ for (i = ETH_HLEN; i < pkt_size; i++)
+ packet[i] = (unsigned char)(i & 0xff);
+
+ rc = qede_selftest_transmit_traffic(edev, skb);
+ if (rc)
+ goto test_loopback_exit;
+
+ rc = qede_selftest_receive_traffic(edev);
+ if (rc)
+ goto test_loopback_exit;
+
+ DP_VERBOSE(edev, NETIF_MSG_RX_STATUS, "Loopback test successful\n");
+
+test_loopback_exit:
+ dev_kfree_skb(skb);
+
+ /* Bring up the link in Normal mode */
+ memset(&link_params, 0, sizeof(link_params));
+ link_params.link_up = true;
+ link_params.override_flags = QED_LINK_OVERRIDE_LOOPBACK_MODE;
+ link_params.loopback_mode = QED_LINK_LOOPBACK_NONE;
+ edev->ops->common->set_link(edev->cdev, &link_params);
+
+ /* Wait for loopback configuration to apply */
+ msleep_interruptible(500);
+
+ qede_netif_start(edev);
+
+ return rc;
+}
+
+static void qede_self_test(struct net_device *dev,
+ struct ethtool_test *etest, u64 *buf)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Self-test command parameters: offline = %d, external_lb = %d\n",
+ (etest->flags & ETH_TEST_FL_OFFLINE),
+ (etest->flags & ETH_TEST_FL_EXTERNAL_LB) >> 2);
+
+ memset(buf, 0, sizeof(u64) * QEDE_ETHTOOL_TEST_MAX);
+
+ if (etest->flags & ETH_TEST_FL_OFFLINE) {
+ if (qede_selftest_run_loopback(edev,
+ QED_LINK_LOOPBACK_INT_PHY)) {
+ buf[QEDE_ETHTOOL_INT_LOOPBACK] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+ }
+
+ if (edev->ops->common->selftest->selftest_interrupt(edev->cdev)) {
+ buf[QEDE_ETHTOOL_INTERRUPT_TEST] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+
+ if (edev->ops->common->selftest->selftest_memory(edev->cdev)) {
+ buf[QEDE_ETHTOOL_MEMORY_TEST] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+
+ if (edev->ops->common->selftest->selftest_register(edev->cdev)) {
+ buf[QEDE_ETHTOOL_REGISTER_TEST] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+
+ if (edev->ops->common->selftest->selftest_clock(edev->cdev)) {
+ buf[QEDE_ETHTOOL_CLOCK_TEST] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+}
+
static const struct ethtool_ops qede_ethtool_ops = {
.get_settings = qede_get_settings,
.set_settings = qede_set_settings,
@@ -584,13 +1141,47 @@ static const struct ethtool_ops qede_ethtool_ops = {
.get_strings = qede_get_strings,
.set_phys_id = qede_set_phys_id,
.get_ethtool_stats = qede_get_ethtool_stats,
+ .get_priv_flags = qede_get_priv_flags,
.get_sset_count = qede_get_sset_count,
+ .get_rxnfc = qede_get_rxnfc,
+ .set_rxnfc = qede_set_rxnfc,
+ .get_rxfh_indir_size = qede_get_rxfh_indir_size,
+ .get_rxfh_key_size = qede_get_rxfh_key_size,
+ .get_rxfh = qede_get_rxfh,
+ .set_rxfh = qede_set_rxfh,
+ .get_channels = qede_get_channels,
+ .set_channels = qede_set_channels,
+ .self_test = qede_self_test,
+};
+static const struct ethtool_ops qede_vf_ethtool_ops = {
+ .get_settings = qede_get_settings,
+ .get_drvinfo = qede_get_drvinfo,
+ .get_msglevel = qede_get_msglevel,
+ .set_msglevel = qede_set_msglevel,
+ .get_link = qede_get_link,
+ .get_ringparam = qede_get_ringparam,
+ .set_ringparam = qede_set_ringparam,
+ .get_strings = qede_get_strings,
+ .get_ethtool_stats = qede_get_ethtool_stats,
+ .get_priv_flags = qede_get_priv_flags,
+ .get_sset_count = qede_get_sset_count,
+ .get_rxnfc = qede_get_rxnfc,
+ .set_rxnfc = qede_set_rxnfc,
+ .get_rxfh_indir_size = qede_get_rxfh_indir_size,
+ .get_rxfh_key_size = qede_get_rxfh_key_size,
+ .get_rxfh = qede_get_rxfh,
+ .set_rxfh = qede_set_rxfh,
.get_channels = qede_get_channels,
.set_channels = qede_set_channels,
};
void qede_set_ethtool_ops(struct net_device *dev)
{
- dev->ethtool_ops = &qede_ethtool_ops;
+ struct qede_dev *edev = netdev_priv(dev);
+
+ if (IS_VF(edev))
+ dev->ethtool_ops = &qede_vf_ethtool_ops;
+ else
+ dev->ethtool_ops = &qede_ethtool_ops;
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 12f6615..8114541 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -24,7 +24,12 @@
#include <linux/netdev_features.h>
#include <linux/udp.h>
#include <linux/tcp.h>
+#ifdef CONFIG_QEDE_VXLAN
#include <net/vxlan.h>
+#endif
+#ifdef CONFIG_QEDE_GENEVE
+#include <net/geneve.h>
+#endif
#include <linux/ip.h>
#include <net/ipv6.h>
#include <net/tcp.h>
@@ -58,6 +63,7 @@ static const struct qed_eth_ops *qed_ops;
#define CHIP_NUM_57980S_100 0x1644
#define CHIP_NUM_57980S_50 0x1654
#define CHIP_NUM_57980S_25 0x1656
+#define CHIP_NUM_57980S_IOV 0x1664
#ifndef PCI_DEVICE_ID_NX2_57980E
#define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40
@@ -66,15 +72,22 @@ static const struct qed_eth_ops *qed_ops;
#define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100
#define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50
#define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25
+#define PCI_DEVICE_ID_57980S_IOV CHIP_NUM_57980S_IOV
#endif
+enum qede_pci_private {
+ QEDE_PRIVATE_PF,
+ QEDE_PRIVATE_VF
+};
+
static const struct pci_device_id qede_pci_tbl[] = {
- { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), 0 },
- { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), 0 },
- { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), 0 },
- { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), 0 },
- { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), 0 },
- { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), 0 },
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), QEDE_PRIVATE_PF},
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), QEDE_PRIVATE_PF},
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), QEDE_PRIVATE_PF},
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF},
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF},
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
{ 0 }
};
@@ -89,17 +102,87 @@ static int qede_alloc_rx_buffer(struct qede_dev *edev,
struct qede_rx_queue *rxq);
static void qede_link_update(void *dev, struct qed_link_output *link);
+#ifdef CONFIG_QED_SRIOV
+static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+
+ if (vlan > 4095) {
+ DP_NOTICE(edev, "Illegal vlan value %d\n", vlan);
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n",
+ vlan, vf);
+
+ return edev->ops->iov->set_vlan(edev->cdev, vlan, vf);
+}
+
+static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+
+ DP_VERBOSE(edev, QED_MSG_IOV,
+ "Setting MAC %02x:%02x:%02x:%02x:%02x:%02x to VF [%d]\n",
+ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], vfidx);
+
+ if (!is_valid_ether_addr(mac)) {
+ DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n");
+ return -EINVAL;
+ }
+
+ return edev->ops->iov->set_mac(edev->cdev, mac, vfidx);
+}
+
+static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
+{
+ struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev));
+ struct qed_dev_info *qed_info = &edev->dev_info.common;
+ int rc;
+
+ DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param);
+
+ rc = edev->ops->iov->configure(edev->cdev, num_vfs_param);
+
+ /* Enable/Disable Tx switching for PF */
+ if ((rc == num_vfs_param) && netif_running(edev->ndev) &&
+ qed_info->mf_mode != QED_MF_NPAR && qed_info->tx_switching) {
+ struct qed_update_vport_params params;
+
+ memset(&params, 0, sizeof(params));
+ params.vport_id = 0;
+ params.update_tx_switching_flg = 1;
+ params.tx_switching_flg = num_vfs_param ? 1 : 0;
+ edev->ops->vport_update(edev->cdev, &params);
+ }
+
+ return rc;
+}
+#endif
+
static struct pci_driver qede_pci_driver = {
.name = "qede",
.id_table = qede_pci_tbl,
.probe = qede_probe,
.remove = qede_remove,
+#ifdef CONFIG_QED_SRIOV
+ .sriov_configure = qede_sriov_configure,
+#endif
};
+static void qede_force_mac(void *dev, u8 *mac)
+{
+ struct qede_dev *edev = dev;
+
+ ether_addr_copy(edev->ndev->dev_addr, mac);
+ ether_addr_copy(edev->primary_mac, mac);
+}
+
static struct qed_eth_cb_ops qede_ll_ops = {
{
.link_update = qede_link_update,
},
+ .force_mac = qede_force_mac,
};
static int qede_netdev_event(struct notifier_block *this, unsigned long event,
@@ -141,19 +224,10 @@ static
int __init qede_init(void)
{
int ret;
- u32 qed_ver;
pr_notice("qede_init: %s\n", version);
- qed_ver = qed_get_protocol_version(QED_PROTOCOL_ETH);
- if (qed_ver != QEDE_ETH_INTERFACE_VERSION) {
- pr_notice("Version mismatch [%08x != %08x]\n",
- qed_ver,
- QEDE_ETH_INTERFACE_VERSION);
- return -EINVAL;
- }
-
- qed_ops = qed_get_eth_ops(QEDE_ETH_INTERFACE_VERSION);
+ qed_ops = qed_get_eth_ops();
if (!qed_ops) {
pr_notice("Failed to get qed ethtool operations\n");
return -EINVAL;
@@ -319,6 +393,9 @@ static u32 qede_xmit_type(struct qede_dev *edev,
(ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
*ipv6_ext = 1;
+ if (skb->encapsulation)
+ rc |= XMIT_ENC;
+
if (skb_is_gso(skb))
rc |= XMIT_LSO;
@@ -380,6 +457,16 @@ static int map_frag_to_bd(struct qede_dev *edev,
return 0;
}
+static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
+{
+ if (is_encap_pkt)
+ return (skb_inner_transport_header(skb) +
+ inner_tcp_hdrlen(skb) - skb->data);
+ else
+ return (skb_transport_header(skb) +
+ tcp_hdrlen(skb) - skb->data);
+}
+
/* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb,
@@ -390,8 +477,7 @@ static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb,
if (xmit_type & XMIT_LSO) {
int hlen;
- hlen = skb_transport_header(skb) +
- tcp_hdrlen(skb) - skb->data;
+ hlen = qede_get_skb_hlen(skb, xmit_type & XMIT_ENC);
/* linear payload would require its own BD */
if (skb_headlen(skb) > hlen)
@@ -499,7 +585,18 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
first_bd->data.bd_flags.bitfields |=
1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
- first_bd->data.bitfields |= cpu_to_le16(temp);
+ if (xmit_type & XMIT_ENC) {
+ first_bd->data.bd_flags.bitfields |=
+ 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+ } else {
+ /* In cases when OS doesn't indicate for inner offloads
+ * when packet is tunnelled, we need to override the HW
+ * tunnel configuration so that packets are treated as
+ * regular non tunnelled packets and no inner offloads
+ * are done by the hardware.
+ */
+ first_bd->data.bitfields |= cpu_to_le16(temp);
+ }
/* If the packet is IPv6 with extension header, indicate that
* to FW and pass few params, since the device cracker doesn't
@@ -515,10 +612,15 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
third_bd->data.lso_mss =
cpu_to_le16(skb_shinfo(skb)->gso_size);
- first_bd->data.bd_flags.bitfields |=
- 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
- hlen = skb_transport_header(skb) +
- tcp_hdrlen(skb) - skb->data;
+ if (unlikely(xmit_type & XMIT_ENC)) {
+ first_bd->data.bd_flags.bitfields |=
+ 1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
+ hlen = qede_get_skb_hlen(skb, true);
+ } else {
+ first_bd->data.bd_flags.bitfields |=
+ 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+ hlen = qede_get_skb_hlen(skb, false);
+ }
/* @@@TBD - if will not be removed need to check */
third_bd->data.bitfields |=
@@ -644,7 +746,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
-static int qede_txq_has_work(struct qede_tx_queue *txq)
+int qede_txq_has_work(struct qede_tx_queue *txq)
{
u16 hw_bd_cons;
@@ -727,7 +829,7 @@ static int qede_tx_int(struct qede_dev *edev,
return 0;
}
-static bool qede_has_rx_work(struct qede_rx_queue *rxq)
+bool qede_has_rx_work(struct qede_rx_queue *rxq)
{
u16 hw_comp_cons, sw_comp_cons;
@@ -782,8 +884,8 @@ static inline void qede_reuse_page(struct qede_dev *edev,
/* In case of allocation failures reuse buffers
* from consumer index to produce buffers for firmware
*/
-static void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
- struct qede_dev *edev, u8 count)
+void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
+ struct qede_dev *edev, u8 count)
{
struct sw_rx_data *curr_cons;
@@ -879,6 +981,9 @@ static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
if (csum_flag & QEDE_CSUM_UNNECESSARY)
skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY)
+ skb->csum_level = 1;
}
static inline void qede_skb_receive(struct qede_dev *edev,
@@ -1188,13 +1293,47 @@ err:
tpa_info->skb = NULL;
}
-static u8 qede_check_csum(u16 flag)
+static bool qede_tunn_exist(u16 flag)
+{
+ return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
+ PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT));
+}
+
+static u8 qede_check_tunn_csum(u16 flag)
+{
+ u16 csum_flag = 0;
+ u8 tcsum = 0;
+
+ if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
+ PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT))
+ csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT;
+
+ if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
+ PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
+ csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
+ tcsum = QEDE_TUNN_CSUM_UNNECESSARY;
+ }
+
+ csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT |
+ PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
+
+ if (csum_flag & flag)
+ return QEDE_CSUM_ERROR;
+
+ return QEDE_CSUM_UNNECESSARY | tcsum;
+}
+
+static u8 qede_check_notunn_csum(u16 flag)
{
u16 csum_flag = 0;
u8 csum = 0;
- if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
- PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) {
+ if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
+ PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
csum = QEDE_CSUM_UNNECESSARY;
@@ -1209,6 +1348,14 @@ static u8 qede_check_csum(u16 flag)
return csum;
}
+static u8 qede_check_csum(u16 flag)
+{
+ if (!qede_tunn_exist(flag))
+ return qede_check_notunn_csum(flag);
+ else
+ return qede_check_tunn_csum(flag);
+}
+
static int qede_rx_int(struct qede_fastpath *fp, int budget)
{
struct qede_dev *edev = fp->edev;
@@ -1569,16 +1716,25 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
edev->stats.coalesced_bytes = stats.tpa_coalesced_bytes;
edev->stats.rx_64_byte_packets = stats.rx_64_byte_packets;
- edev->stats.rx_127_byte_packets = stats.rx_127_byte_packets;
- edev->stats.rx_255_byte_packets = stats.rx_255_byte_packets;
- edev->stats.rx_511_byte_packets = stats.rx_511_byte_packets;
- edev->stats.rx_1023_byte_packets = stats.rx_1023_byte_packets;
- edev->stats.rx_1518_byte_packets = stats.rx_1518_byte_packets;
- edev->stats.rx_1522_byte_packets = stats.rx_1522_byte_packets;
- edev->stats.rx_2047_byte_packets = stats.rx_2047_byte_packets;
- edev->stats.rx_4095_byte_packets = stats.rx_4095_byte_packets;
- edev->stats.rx_9216_byte_packets = stats.rx_9216_byte_packets;
- edev->stats.rx_16383_byte_packets = stats.rx_16383_byte_packets;
+ edev->stats.rx_65_to_127_byte_packets = stats.rx_65_to_127_byte_packets;
+ edev->stats.rx_128_to_255_byte_packets =
+ stats.rx_128_to_255_byte_packets;
+ edev->stats.rx_256_to_511_byte_packets =
+ stats.rx_256_to_511_byte_packets;
+ edev->stats.rx_512_to_1023_byte_packets =
+ stats.rx_512_to_1023_byte_packets;
+ edev->stats.rx_1024_to_1518_byte_packets =
+ stats.rx_1024_to_1518_byte_packets;
+ edev->stats.rx_1519_to_1522_byte_packets =
+ stats.rx_1519_to_1522_byte_packets;
+ edev->stats.rx_1519_to_2047_byte_packets =
+ stats.rx_1519_to_2047_byte_packets;
+ edev->stats.rx_2048_to_4095_byte_packets =
+ stats.rx_2048_to_4095_byte_packets;
+ edev->stats.rx_4096_to_9216_byte_packets =
+ stats.rx_4096_to_9216_byte_packets;
+ edev->stats.rx_9217_to_16383_byte_packets =
+ stats.rx_9217_to_16383_byte_packets;
edev->stats.rx_crc_errors = stats.rx_crc_errors;
edev->stats.rx_mac_crtl_frames = stats.rx_mac_crtl_frames;
edev->stats.rx_pause_frames = stats.rx_pause_frames;
@@ -1652,6 +1808,49 @@ static struct rtnl_link_stats64 *qede_get_stats64(
return stats;
}
+#ifdef CONFIG_QED_SRIOV
+static int qede_get_vf_config(struct net_device *dev, int vfidx,
+ struct ifla_vf_info *ivi)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ if (!edev->ops)
+ return -EINVAL;
+
+ return edev->ops->iov->get_config(edev->cdev, vfidx, ivi);
+}
+
+static int qede_set_vf_rate(struct net_device *dev, int vfidx,
+ int min_tx_rate, int max_tx_rate)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ return edev->ops->iov->set_rate(edev->cdev, vfidx, max_tx_rate,
+ max_tx_rate);
+}
+
+static int qede_set_vf_spoofchk(struct net_device *dev, int vfidx, bool val)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ if (!edev->ops)
+ return -EINVAL;
+
+ return edev->ops->iov->set_spoof(edev->cdev, vfidx, val);
+}
+
+static int qede_set_vf_link_state(struct net_device *dev, int vfidx,
+ int link_state)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ if (!edev->ops)
+ return -EINVAL;
+
+ return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state);
+}
+#endif
+
static void qede_config_accept_any_vlan(struct qede_dev *edev, bool action)
{
struct qed_update_vport_params params;
@@ -1893,6 +2092,76 @@ static void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
edev->accept_any_vlan = false;
}
+#ifdef CONFIG_QEDE_VXLAN
+static void qede_add_vxlan_port(struct net_device *dev,
+ sa_family_t sa_family, __be16 port)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ u16 t_port = ntohs(port);
+
+ if (edev->vxlan_dst_port)
+ return;
+
+ edev->vxlan_dst_port = t_port;
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d", t_port);
+
+ set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
+}
+
+static void qede_del_vxlan_port(struct net_device *dev,
+ sa_family_t sa_family, __be16 port)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ u16 t_port = ntohs(port);
+
+ if (t_port != edev->vxlan_dst_port)
+ return;
+
+ edev->vxlan_dst_port = 0;
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d", t_port);
+
+ set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
+}
+#endif
+
+#ifdef CONFIG_QEDE_GENEVE
+static void qede_add_geneve_port(struct net_device *dev,
+ sa_family_t sa_family, __be16 port)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ u16 t_port = ntohs(port);
+
+ if (edev->geneve_dst_port)
+ return;
+
+ edev->geneve_dst_port = t_port;
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d", t_port);
+ set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
+}
+
+static void qede_del_geneve_port(struct net_device *dev,
+ sa_family_t sa_family, __be16 port)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ u16 t_port = ntohs(port);
+
+ if (t_port != edev->geneve_dst_port)
+ return;
+
+ edev->geneve_dst_port = 0;
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d", t_port);
+ set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
+}
+#endif
+
static const struct net_device_ops qede_netdev_ops = {
.ndo_open = qede_open,
.ndo_stop = qede_close,
@@ -1901,9 +2170,27 @@ static const struct net_device_ops qede_netdev_ops = {
.ndo_set_mac_address = qede_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = qede_change_mtu,
+#ifdef CONFIG_QED_SRIOV
+ .ndo_set_vf_mac = qede_set_vf_mac,
+ .ndo_set_vf_vlan = qede_set_vf_vlan,
+#endif
.ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
.ndo_get_stats64 = qede_get_stats64,
+#ifdef CONFIG_QED_SRIOV
+ .ndo_set_vf_link_state = qede_set_vf_link_state,
+ .ndo_set_vf_spoofchk = qede_set_vf_spoofchk,
+ .ndo_get_vf_config = qede_get_vf_config,
+ .ndo_set_vf_rate = qede_set_vf_rate,
+#endif
+#ifdef CONFIG_QEDE_VXLAN
+ .ndo_add_vxlan_port = qede_add_vxlan_port,
+ .ndo_del_vxlan_port = qede_del_vxlan_port,
+#endif
+#ifdef CONFIG_QEDE_GENEVE
+ .ndo_add_geneve_port = qede_add_geneve_port,
+ .ndo_del_geneve_port = qede_del_geneve_port,
+#endif
};
/* -------------------------------------------------------------------------
@@ -1974,6 +2261,14 @@ static void qede_init_ndev(struct qede_dev *edev)
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO6;
+ /* Encap features*/
+ hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_TSO_ECN;
+ ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO_ECN |
+ NETIF_F_TSO6 | NETIF_F_GSO_GRE |
+ NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXCSUM;
+
ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
NETIF_F_HIGHDMA;
ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
@@ -2074,6 +2369,8 @@ static void qede_sp_task(struct work_struct *work)
{
struct qede_dev *edev = container_of(work, struct qede_dev,
sp_task.work);
+ struct qed_dev *cdev = edev->cdev;
+
mutex_lock(&edev->qede_lock);
if (edev->state == QEDE_STATE_OPEN) {
@@ -2081,6 +2378,24 @@ static void qede_sp_task(struct work_struct *work)
qede_config_rx_mode(edev->ndev);
}
+ if (test_and_clear_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags)) {
+ struct qed_tunn_params tunn_params;
+
+ memset(&tunn_params, 0, sizeof(tunn_params));
+ tunn_params.update_vxlan_port = 1;
+ tunn_params.vxlan_port = edev->vxlan_dst_port;
+ qed_ops->tunn_config(cdev, &tunn_params);
+ }
+
+ if (test_and_clear_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags)) {
+ struct qed_tunn_params tunn_params;
+
+ memset(&tunn_params, 0, sizeof(tunn_params));
+ tunn_params.update_geneve_port = 1;
+ tunn_params.geneve_port = edev->geneve_dst_port;
+ qed_ops->tunn_config(cdev, &tunn_params);
+ }
+
mutex_unlock(&edev->qede_lock);
}
@@ -2099,8 +2414,9 @@ enum qede_probe_mode {
};
static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
- enum qede_probe_mode mode)
+ bool is_vf, enum qede_probe_mode mode)
{
+ struct qed_probe_params probe_params;
struct qed_slowpath_params params;
struct qed_dev_eth_info dev_info;
struct qede_dev *edev;
@@ -2110,8 +2426,12 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
if (unlikely(dp_level & QED_LEVEL_INFO))
pr_notice("Starting qede probe\n");
- cdev = qed_ops->common->probe(pdev, QED_PROTOCOL_ETH,
- dp_module, dp_level);
+ memset(&probe_params, 0, sizeof(probe_params));
+ probe_params.protocol = QED_PROTOCOL_ETH;
+ probe_params.dp_module = dp_module;
+ probe_params.dp_level = dp_level;
+ probe_params.is_vf = is_vf;
+ cdev = qed_ops->common->probe(pdev, &probe_params);
if (!cdev) {
rc = -ENODEV;
goto err0;
@@ -2145,6 +2465,9 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
goto err2;
}
+ if (is_vf)
+ edev->flags |= QEDE_FLAG_IS_VF;
+
qede_init_ndev(edev);
rc = register_netdev(edev->ndev);
@@ -2176,12 +2499,24 @@ err0:
static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
+ bool is_vf = false;
u32 dp_module = 0;
u8 dp_level = 0;
+ switch ((enum qede_pci_private)id->driver_data) {
+ case QEDE_PRIVATE_VF:
+ if (debug & QED_LOG_VERBOSE_MASK)
+ dev_err(&pdev->dev, "Probing a VF\n");
+ is_vf = true;
+ break;
+ default:
+ if (debug & QED_LOG_VERBOSE_MASK)
+ dev_err(&pdev->dev, "Probing a PF\n");
+ }
+
qede_config_debug(debug, &dp_module, &dp_level);
- return __qede_probe(pdev, dp_module, dp_level,
+ return __qede_probe(pdev, dp_module, dp_level, is_vf,
QEDE_PROBE_NORMAL);
}
@@ -2876,10 +3211,11 @@ static int qede_start_queues(struct qede_dev *edev)
int rc, tc, i;
int vlan_removal_en = 1;
struct qed_dev *cdev = edev->cdev;
- struct qed_update_vport_rss_params *rss_params = &edev->rss_params;
struct qed_update_vport_params vport_update_params;
struct qed_queue_start_common_params q_params;
+ struct qed_dev_info *qed_info = &edev->dev_info.common;
struct qed_start_vport_params start = {0};
+ bool reset_rss_indir = false;
if (!edev->num_rss) {
DP_ERR(edev,
@@ -2971,19 +3307,59 @@ static int qede_start_queues(struct qede_dev *edev)
vport_update_params.update_vport_active_flg = 1;
vport_update_params.vport_active_flg = 1;
+ if ((qed_info->mf_mode == QED_MF_NPAR || pci_num_vf(edev->pdev)) &&
+ qed_info->tx_switching) {
+ vport_update_params.update_tx_switching_flg = 1;
+ vport_update_params.tx_switching_flg = 1;
+ }
+
/* Fill struct with RSS params */
if (QEDE_RSS_CNT(edev) > 1) {
vport_update_params.update_rss_flg = 1;
- for (i = 0; i < 128; i++)
- rss_params->rss_ind_table[i] =
- ethtool_rxfh_indir_default(i, QEDE_RSS_CNT(edev));
- netdev_rss_key_fill(rss_params->rss_key,
- sizeof(rss_params->rss_key));
+
+ /* Need to validate current RSS config uses valid entries */
+ for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
+ if (edev->rss_params.rss_ind_table[i] >=
+ edev->num_rss) {
+ reset_rss_indir = true;
+ break;
+ }
+ }
+
+ if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) ||
+ reset_rss_indir) {
+ u16 val;
+
+ for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
+ u16 indir_val;
+
+ val = QEDE_RSS_CNT(edev);
+ indir_val = ethtool_rxfh_indir_default(i, val);
+ edev->rss_params.rss_ind_table[i] = indir_val;
+ }
+ edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
+ }
+
+ if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) {
+ netdev_rss_key_fill(edev->rss_params.rss_key,
+ sizeof(edev->rss_params.rss_key));
+ edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
+ }
+
+ if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) {
+ edev->rss_params.rss_caps = QED_RSS_IPV4 |
+ QED_RSS_IPV6 |
+ QED_RSS_IPV4_TCP |
+ QED_RSS_IPV6_TCP;
+ edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
+ }
+
+ memcpy(&vport_update_params.rss_params, &edev->rss_params,
+ sizeof(vport_update_params.rss_params));
} else {
- memset(rss_params, 0, sizeof(*rss_params));
+ memset(&vport_update_params.rss_params, 0,
+ sizeof(vport_update_params.rss_params));
}
- memcpy(&vport_update_params.rss_params, rss_params,
- sizeof(*rss_params));
rc = edev->ops->vport_update(cdev, &vport_update_params);
if (rc) {
@@ -3165,12 +3541,24 @@ void qede_reload(struct qede_dev *edev,
static int qede_open(struct net_device *ndev)
{
struct qede_dev *edev = netdev_priv(ndev);
+ int rc;
netif_carrier_off(ndev);
edev->ops->common->set_power_state(edev->cdev, PCI_D0);
- return qede_load(edev, QEDE_LOAD_NORMAL);
+ rc = qede_load(edev, QEDE_LOAD_NORMAL);
+
+ if (rc)
+ return rc;
+
+#ifdef CONFIG_QEDE_VXLAN
+ vxlan_get_rx_port(ndev);
+#endif
+#ifdef CONFIG_QEDE_GENEVE
+ geneve_get_rx_port(ndev);
+#endif
+ return 0;
}
static int qede_close(struct net_device *ndev)
@@ -3221,6 +3609,11 @@ static int qede_set_mac_addr(struct net_device *ndev, void *p)
return -EFAULT;
}
+ if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) {
+ DP_NOTICE(edev, "qed prevents setting MAC\n");
+ return -EINVAL;
+ }
+
ether_addr_copy(ndev->dev_addr, addr->sa_data);
if (!netif_running(ndev)) {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 1205f6f..1c29105 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -3952,8 +3952,14 @@ static pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *pdev,
static pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *pdev)
{
- return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
- PCI_ERS_RESULT_RECOVERED;
+ pci_ers_result_t res;
+
+ rtnl_lock();
+ res = qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
+ PCI_ERS_RESULT_RECOVERED;
+ rtnl_unlock();
+
+ return res;
}
static void qlcnic_82xx_io_resume(struct pci_dev *pdev)
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index b28e73e..83d7210 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -4687,7 +4687,7 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
/*
* Set up the operating parameters.
*/
- qdev->workqueue = create_singlethread_workqueue(ndev->name);
+ qdev->workqueue = alloc_ordered_workqueue(ndev->name, WQ_MEM_RECLAIM);
INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);