summaryrefslogtreecommitdiff
path: root/drivers/net/cxgb3/sge.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/cxgb3/sge.c')
-rw-r--r--drivers/net/cxgb3/sge.c105
1 files changed, 87 insertions, 18 deletions
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index c6480be..6c641a8 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -36,6 +36,7 @@
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/dma-mapping.h>
+#include <net/arp.h>
#include "common.h"
#include "regs.h"
#include "sge_defs.h"
@@ -549,16 +550,15 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
if (!p)
return NULL;
- if (sw_size) {
+ if (sw_size && metadata) {
s = kcalloc(nelem, sw_size, GFP_KERNEL);
if (!s) {
dma_free_coherent(&pdev->dev, len, p, *phys);
return NULL;
}
- }
- if (metadata)
*(void **)metadata = s;
+ }
memset(p, 0, len);
return p;
}
@@ -1121,10 +1121,10 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
htonl(V_WR_TID(q->token)));
}
-static inline void t3_stop_queue(struct net_device *dev, struct sge_qset *qs,
- struct sge_txq *q)
+static inline void t3_stop_tx_queue(struct netdev_queue *txq,
+ struct sge_qset *qs, struct sge_txq *q)
{
- netif_stop_queue(dev);
+ netif_tx_stop_queue(txq);
set_bit(TXQ_ETH, &qs->txq_stopped);
q->stops++;
}
@@ -1138,11 +1138,13 @@ static inline void t3_stop_queue(struct net_device *dev, struct sge_qset *qs,
*/
int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
{
+ int qidx;
unsigned int ndesc, pidx, credits, gen, compl;
const struct port_info *pi = netdev_priv(dev);
struct adapter *adap = pi->adapter;
- struct sge_qset *qs = pi->qs;
- struct sge_txq *q = &qs->txq[TXQ_ETH];
+ struct netdev_queue *txq;
+ struct sge_qset *qs;
+ struct sge_txq *q;
/*
* The chip min packet length is 9 octets but play safe and reject
@@ -1153,6 +1155,11 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+ qidx = skb_get_queue_mapping(skb);
+ qs = &pi->qs[qidx];
+ q = &qs->txq[TXQ_ETH];
+ txq = netdev_get_tx_queue(dev, qidx);
+
spin_lock(&q->lock);
reclaim_completed_tx(adap, q);
@@ -1160,7 +1167,7 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
ndesc = calc_tx_descs(skb);
if (unlikely(credits < ndesc)) {
- t3_stop_queue(dev, qs, q);
+ t3_stop_tx_queue(txq, qs, q);
dev_err(&adap->pdev->dev,
"%s: Tx ring %u full while queue awake!\n",
dev->name, q->cntxt_id & 7);
@@ -1170,12 +1177,12 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
q->in_use += ndesc;
if (unlikely(credits - ndesc < q->stop_thres)) {
- t3_stop_queue(dev, qs, q);
+ t3_stop_tx_queue(txq, qs, q);
if (should_restart_tx(q) &&
test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
q->restarts++;
- netif_wake_queue(dev);
+ netif_tx_wake_queue(txq);
}
}
@@ -1839,7 +1846,7 @@ static void restart_tx(struct sge_qset *qs)
test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
qs->txq[TXQ_ETH].restarts++;
if (netif_running(qs->netdev))
- netif_wake_queue(qs->netdev);
+ netif_tx_wake_queue(qs->tx_q);
}
if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
@@ -1857,6 +1864,54 @@ static void restart_tx(struct sge_qset *qs)
}
/**
+ * cxgb3_arp_process - process an ARP request probing a private IP address
+ * @adapter: the adapter
+ * @skb: the skbuff containing the ARP request
+ *
+ * Check if the ARP request is probing the private IP address
+ * dedicated to iSCSI, generate an ARP reply if so.
+ */
+static void cxgb3_arp_process(struct adapter *adapter, struct sk_buff *skb)
+{
+ struct net_device *dev = skb->dev;
+ struct port_info *pi;
+ struct arphdr *arp;
+ unsigned char *arp_ptr;
+ unsigned char *sha;
+ __be32 sip, tip;
+
+ if (!dev)
+ return;
+
+ skb_reset_network_header(skb);
+ arp = arp_hdr(skb);
+
+ if (arp->ar_op != htons(ARPOP_REQUEST))
+ return;
+
+ arp_ptr = (unsigned char *)(arp + 1);
+ sha = arp_ptr;
+ arp_ptr += dev->addr_len;
+ memcpy(&sip, arp_ptr, sizeof(sip));
+ arp_ptr += sizeof(sip);
+ arp_ptr += dev->addr_len;
+ memcpy(&tip, arp_ptr, sizeof(tip));
+
+ pi = netdev_priv(dev);
+ if (tip != pi->iscsi_ipv4addr)
+ return;
+
+ arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
+ dev->dev_addr, sha);
+
+}
+
+static inline int is_arp(struct sk_buff *skb)
+{
+ return skb->protocol == htons(ETH_P_ARP);
+}
+
+/**
* rx_eth - process an ingress ethernet packet
* @adap: the adapter
* @rq: the response queue that received the packet
@@ -1876,11 +1931,10 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
skb_pull(skb, sizeof(*p) + pad);
skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
- skb->dev->last_rx = jiffies;
pi = netdev_priv(skb->dev);
if (pi->rx_csum_offload && p->csum_valid && p->csum == htons(0xffff) &&
!p->fragment) {
- rspq_to_qset(rq)->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
+ qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else
skb->ip_summed = CHECKSUM_NONE;
@@ -1895,16 +1949,28 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
grp,
ntohs(p->vlan),
p);
- else
+ else {
+ if (unlikely(pi->iscsi_ipv4addr &&
+ is_arp(skb))) {
+ unsigned short vtag = ntohs(p->vlan) &
+ VLAN_VID_MASK;
+ skb->dev = vlan_group_get_device(grp,
+ vtag);
+ cxgb3_arp_process(adap, skb);
+ }
__vlan_hwaccel_rx(skb, grp, ntohs(p->vlan),
rq->polling);
+ }
else
dev_kfree_skb_any(skb);
} else if (rq->polling) {
if (lro)
lro_receive_skb(&qs->lro_mgr, skb, p);
- else
+ else {
+ if (unlikely(pi->iscsi_ipv4addr && is_arp(skb)))
+ cxgb3_arp_process(adap, skb);
netif_receive_skb(skb);
+ }
} else
netif_rx(skb);
}
@@ -2308,7 +2374,7 @@ next_fl:
static inline int is_pure_response(const struct rsp_desc *r)
{
- u32 n = ntohl(r->flags) & (F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
+ __be32 n = r->flags & htonl(F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
return (n | r->len_cq) == 0;
}
@@ -2826,6 +2892,7 @@ void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
* @p: configuration parameters for this queue set
* @ntxq: number of Tx queues for the queue set
* @netdev: net device associated with this queue set
+ * @netdevq: net device TX queue associated with this queue set
*
* Allocate resources and initialize an SGE queue set. A queue set
* comprises a response queue, two Rx free-buffer queues, and up to 3
@@ -2834,7 +2901,8 @@ void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
*/
int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
int irq_vec_idx, const struct qset_params *p,
- int ntxq, struct net_device *dev)
+ int ntxq, struct net_device *dev,
+ struct netdev_queue *netdevq)
{
int i, avail, ret = -ENOMEM;
struct sge_qset *q = &adapter->sge.qs[id];
@@ -2970,6 +3038,7 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
q->adap = adapter;
q->netdev = dev;
+ q->tx_q = netdevq;
t3_update_qset_coalesce(q, p);
init_lro_mgr(q, lro_mgr);