summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx4/en_netdev.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c152
1 files changed, 58 insertions, 94 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index e72d8a1..fa37b7a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -75,7 +75,7 @@ static int mlx4_en_low_latency_recv(struct napi_struct *napi)
struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
struct net_device *dev = cq->dev;
struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring];
+ struct mlx4_en_rx_ring *rx_ring = &priv->rx_ring[cq->ring];
int done;
if (!priv->port_up)
@@ -102,7 +102,6 @@ struct mlx4_en_filter {
struct list_head next;
struct work_struct work;
- u8 ip_proto;
__be32 src_ip;
__be32 dst_ip;
__be16 src_port;
@@ -121,26 +120,14 @@ struct mlx4_en_filter {
static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv);
-static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
-{
- switch (ip_proto) {
- case IPPROTO_UDP:
- return MLX4_NET_TRANS_RULE_ID_UDP;
- case IPPROTO_TCP:
- return MLX4_NET_TRANS_RULE_ID_TCP;
- default:
- return -EPROTONOSUPPORT;
- }
-};
-
static void mlx4_en_filter_work(struct work_struct *work)
{
struct mlx4_en_filter *filter = container_of(work,
struct mlx4_en_filter,
work);
struct mlx4_en_priv *priv = filter->priv;
- struct mlx4_spec_list spec_tcp_udp = {
- .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto),
+ struct mlx4_spec_list spec_tcp = {
+ .id = MLX4_NET_TRANS_RULE_ID_TCP,
{
.tcp_udp = {
.dst_port = filter->dst_port,
@@ -176,14 +163,9 @@ static void mlx4_en_filter_work(struct work_struct *work)
int rc;
__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
- if (spec_tcp_udp.id < 0) {
- en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
- filter->ip_proto);
- goto ignore;
- }
list_add_tail(&spec_eth.list, &rule.list);
list_add_tail(&spec_ip.list, &rule.list);
- list_add_tail(&spec_tcp_udp.list, &rule.list);
+ list_add_tail(&spec_tcp.list, &rule.list);
rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN);
@@ -201,7 +183,6 @@ static void mlx4_en_filter_work(struct work_struct *work)
if (rc)
en_err(priv, "Error attaching flow. err = %d\n", rc);
-ignore:
mlx4_en_filter_rfs_expire(priv);
filter->activated = 1;
@@ -225,8 +206,8 @@ filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
static struct mlx4_en_filter *
mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
- __be32 dst_ip, u8 ip_proto, __be16 src_port,
- __be16 dst_port, u32 flow_id)
+ __be32 dst_ip, __be16 src_port, __be16 dst_port,
+ u32 flow_id)
{
struct mlx4_en_filter *filter = NULL;
@@ -240,7 +221,6 @@ mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
filter->src_ip = src_ip;
filter->dst_ip = dst_ip;
- filter->ip_proto = ip_proto;
filter->src_port = src_port;
filter->dst_port = dst_port;
@@ -272,7 +252,7 @@ static void mlx4_en_filter_free(struct mlx4_en_filter *filter)
static inline struct mlx4_en_filter *
mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
- u8 ip_proto, __be16 src_port, __be16 dst_port)
+ __be16 src_port, __be16 dst_port)
{
struct mlx4_en_filter *filter;
struct mlx4_en_filter *ret = NULL;
@@ -283,7 +263,6 @@ mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
filter_chain) {
if (filter->src_ip == src_ip &&
filter->dst_ip == dst_ip &&
- filter->ip_proto == ip_proto &&
filter->src_port == src_port &&
filter->dst_port == dst_port) {
ret = filter;
@@ -302,7 +281,6 @@ mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
struct mlx4_en_filter *filter;
const struct iphdr *ip;
const __be16 *ports;
- u8 ip_proto;
__be32 src_ip;
__be32 dst_ip;
__be16 src_port;
@@ -317,19 +295,18 @@ mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
if (ip_is_fragment(ip))
return -EPROTONOSUPPORT;
- if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP))
- return -EPROTONOSUPPORT;
ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
- ip_proto = ip->protocol;
src_ip = ip->saddr;
dst_ip = ip->daddr;
src_port = ports[0];
dst_port = ports[1];
+ if (ip->protocol != IPPROTO_TCP)
+ return -EPROTONOSUPPORT;
+
spin_lock_bh(&priv->filters_lock);
- filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto,
- src_port, dst_port);
+ filter = mlx4_en_filter_find(priv, src_ip, dst_ip, src_port, dst_port);
if (filter) {
if (filter->rxq_index == rxq_index)
goto out;
@@ -337,7 +314,7 @@ mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
filter->rxq_index = rxq_index;
} else {
filter = mlx4_en_filter_alloc(priv, rxq_index,
- src_ip, dst_ip, ip_proto,
+ src_ip, dst_ip,
src_port, dst_port, flow_id);
if (!filter) {
ret = -ENOMEM;
@@ -355,7 +332,8 @@ err:
return ret;
}
-void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv)
+void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *rx_ring)
{
struct mlx4_en_filter *filter, *tmp;
LIST_HEAD(del_list);
@@ -439,6 +417,7 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int err;
+ int idx;
en_dbg(HW, priv, "Killing VID:%d\n", vid);
@@ -446,7 +425,10 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
/* Remove VID from port VLAN filter */
mutex_lock(&mdev->state_lock);
- mlx4_unregister_vlan(mdev->dev, priv->port, vid);
+ if (!mlx4_find_cached_vlan(mdev->dev, priv->port, vid, &idx))
+ mlx4_unregister_vlan(mdev->dev, priv->port, idx);
+ else
+ en_dbg(HW, priv, "could not find vid %d in cache\n", vid);
if (mdev->device_up && priv->port_up) {
err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
@@ -1241,7 +1223,7 @@ static void mlx4_en_netpoll(struct net_device *dev)
int i;
for (i = 0; i < priv->rx_ring_num; i++) {
- cq = priv->rx_cq[i];
+ cq = &priv->rx_cq[i];
spin_lock_irqsave(&cq->lock, flags);
napi_synchronize(&cq->napi);
mlx4_en_process_rx_cq(dev, cq, 0);
@@ -1263,8 +1245,8 @@ static void mlx4_en_tx_timeout(struct net_device *dev)
if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i)))
continue;
en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
- i, priv->tx_ring[i]->qpn, priv->tx_ring[i]->cqn,
- priv->tx_ring[i]->cons, priv->tx_ring[i]->prod);
+ i, priv->tx_ring[i].qpn, priv->tx_ring[i].cqn,
+ priv->tx_ring[i].cons, priv->tx_ring[i].prod);
}
priv->port_stats.tx_timeout++;
@@ -1304,7 +1286,7 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
/* Setup cq moderation params */
for (i = 0; i < priv->rx_ring_num; i++) {
- cq = priv->rx_cq[i];
+ cq = &priv->rx_cq[i];
cq->moder_cnt = priv->rx_frames;
cq->moder_time = priv->rx_usecs;
priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
@@ -1313,7 +1295,7 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
}
for (i = 0; i < priv->tx_ring_num; i++) {
- cq = priv->tx_cq[i];
+ cq = &priv->tx_cq[i];
cq->moder_cnt = priv->tx_frames;
cq->moder_time = priv->tx_usecs;
}
@@ -1347,8 +1329,8 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
for (ring = 0; ring < priv->rx_ring_num; ring++) {
spin_lock_bh(&priv->stats_lock);
- rx_packets = priv->rx_ring[ring]->packets;
- rx_bytes = priv->rx_ring[ring]->bytes;
+ rx_packets = priv->rx_ring[ring].packets;
+ rx_bytes = priv->rx_ring[ring].bytes;
spin_unlock_bh(&priv->stats_lock);
rx_pkt_diff = ((unsigned long) (rx_packets -
@@ -1377,7 +1359,7 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
if (moder_time != priv->last_moder_time[ring]) {
priv->last_moder_time[ring] = moder_time;
- cq = priv->rx_cq[ring];
+ cq = &priv->rx_cq[ring];
cq->moder_time = moder_time;
cq->moder_cnt = priv->rx_frames;
err = mlx4_en_set_cq_moder(priv, cq);
@@ -1500,7 +1482,7 @@ int mlx4_en_start_port(struct net_device *dev)
return err;
}
for (i = 0; i < priv->rx_ring_num; i++) {
- cq = priv->rx_cq[i];
+ cq = &priv->rx_cq[i];
mlx4_en_cq_init_lock(cq);
@@ -1518,7 +1500,7 @@ int mlx4_en_start_port(struct net_device *dev)
goto cq_err;
}
mlx4_en_arm_cq(priv, cq);
- priv->rx_ring[i]->cqn = cq->mcq.cqn;
+ priv->rx_ring[i].cqn = cq->mcq.cqn;
++rx_index;
}
@@ -1544,7 +1526,7 @@ int mlx4_en_start_port(struct net_device *dev)
/* Configure tx cq's and rings */
for (i = 0; i < priv->tx_ring_num; i++) {
/* Configure cq */
- cq = priv->tx_cq[i];
+ cq = &priv->tx_cq[i];
err = mlx4_en_activate_cq(priv, cq, i);
if (err) {
en_err(priv, "Failed allocating Tx CQ\n");
@@ -1560,7 +1542,7 @@ int mlx4_en_start_port(struct net_device *dev)
cq->buf->wqe_index = cpu_to_be16(0xffff);
/* Configure ring */
- tx_ring = priv->tx_ring[i];
+ tx_ring = &priv->tx_ring[i];
err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
i / priv->num_tx_rings_p_up);
if (err) {
@@ -1630,8 +1612,8 @@ int mlx4_en_start_port(struct net_device *dev)
tx_err:
while (tx_index--) {
- mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]);
- mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]);
+ mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]);
+ mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]);
}
mlx4_en_destroy_drop_qp(priv);
rss_err:
@@ -1640,9 +1622,9 @@ mac_err:
mlx4_en_put_qp(priv);
cq_err:
while (rx_index--)
- mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
+ mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
for (i = 0; i < priv->rx_ring_num; i++)
- mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
+ mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
return err; /* need to close devices */
}
@@ -1738,25 +1720,25 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
/* Free TX Rings */
for (i = 0; i < priv->tx_ring_num; i++) {
- mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]);
- mlx4_en_deactivate_cq(priv, priv->tx_cq[i]);
+ mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]);
+ mlx4_en_deactivate_cq(priv, &priv->tx_cq[i]);
}
msleep(10);
for (i = 0; i < priv->tx_ring_num; i++)
- mlx4_en_free_tx_buf(dev, priv->tx_ring[i]);
+ mlx4_en_free_tx_buf(dev, &priv->tx_ring[i]);
/* Free RSS qps */
mlx4_en_release_rss_steer(priv);
/* Unregister Mac address for the port */
mlx4_en_put_qp(priv);
- if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN))
+ if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN))
mdev->mac_removed[priv->port] = 1;
/* Free RX Rings */
for (i = 0; i < priv->rx_ring_num; i++) {
- struct mlx4_en_cq *cq = priv->rx_cq[i];
+ struct mlx4_en_cq *cq = &priv->rx_cq[i];
local_bh_disable();
while (!mlx4_en_cq_lock_napi(cq)) {
@@ -1767,7 +1749,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
while (test_bit(NAPI_STATE_SCHED, &cq->napi.state))
msleep(1);
- mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
+ mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
mlx4_en_deactivate_cq(priv, cq);
}
}
@@ -1805,15 +1787,15 @@ static void mlx4_en_clear_stats(struct net_device *dev)
memset(&priv->port_stats, 0, sizeof(priv->port_stats));
for (i = 0; i < priv->tx_ring_num; i++) {
- priv->tx_ring[i]->bytes = 0;
- priv->tx_ring[i]->packets = 0;
- priv->tx_ring[i]->tx_csum = 0;
+ priv->tx_ring[i].bytes = 0;
+ priv->tx_ring[i].packets = 0;
+ priv->tx_ring[i].tx_csum = 0;
}
for (i = 0; i < priv->rx_ring_num; i++) {
- priv->rx_ring[i]->bytes = 0;
- priv->rx_ring[i]->packets = 0;
- priv->rx_ring[i]->csum_ok = 0;
- priv->rx_ring[i]->csum_none = 0;
+ priv->rx_ring[i].bytes = 0;
+ priv->rx_ring[i].packets = 0;
+ priv->rx_ring[i].csum_ok = 0;
+ priv->rx_ring[i].csum_none = 0;
}
}
@@ -1870,17 +1852,17 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
#endif
for (i = 0; i < priv->tx_ring_num; i++) {
- if (priv->tx_ring && priv->tx_ring[i])
+ if (priv->tx_ring[i].tx_info)
mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
- if (priv->tx_cq && priv->tx_cq[i])
+ if (priv->tx_cq[i].buf)
mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
}
for (i = 0; i < priv->rx_ring_num; i++) {
- if (priv->rx_ring[i])
+ if (priv->rx_ring[i].rx_info)
mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
priv->prof->rx_ring_size, priv->stride);
- if (priv->rx_cq[i])
+ if (priv->rx_cq[i].buf)
mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
}
@@ -1895,7 +1877,6 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
struct mlx4_en_port_profile *prof = priv->prof;
int i;
int err;
- int node;
err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &priv->base_tx_qpn);
if (err) {
@@ -1905,26 +1886,23 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
/* Create tx Rings */
for (i = 0; i < priv->tx_ring_num; i++) {
- node = cpu_to_node(i % num_online_cpus());
if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
- prof->tx_ring_size, i, TX, node))
+ prof->tx_ring_size, i, TX))
goto err;
if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], priv->base_tx_qpn + i,
- prof->tx_ring_size, TXBB_SIZE, node))
+ prof->tx_ring_size, TXBB_SIZE))
goto err;
}
/* Create rx Rings */
for (i = 0; i < priv->rx_ring_num; i++) {
- node = cpu_to_node(i % num_online_cpus());
if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
- prof->rx_ring_size, i, RX, node))
+ prof->rx_ring_size, i, RX))
goto err;
if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
- prof->rx_ring_size, priv->stride,
- node))
+ prof->rx_ring_size, priv->stride))
goto err;
}
@@ -1940,20 +1918,6 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
err:
en_err(priv, "Failed to allocate NIC resources\n");
- for (i = 0; i < priv->rx_ring_num; i++) {
- if (priv->rx_ring[i])
- mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
- prof->rx_ring_size,
- priv->stride);
- if (priv->rx_cq[i])
- mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
- }
- for (i = 0; i < priv->tx_ring_num; i++) {
- if (priv->tx_ring[i])
- mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
- if (priv->tx_cq[i])
- mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
- }
return -ENOMEM;
}
@@ -2247,13 +2211,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
priv->tx_ring_num = prof->tx_ring_num;
- priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
+ priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring) * MAX_TX_RINGS,
GFP_KERNEL);
if (!priv->tx_ring) {
err = -ENOMEM;
goto out;
}
- priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS,
+ priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq) * MAX_TX_RINGS,
GFP_KERNEL);
if (!priv->tx_cq) {
err = -ENOMEM;