summaryrefslogtreecommitdiff
path: root/drivers/staging/octeon
diff options
context:
space:
mode:
authorScott Wood <scottwood@freescale.com>2014-04-07 23:49:35 (GMT)
committerScott Wood <scottwood@freescale.com>2014-04-07 23:49:35 (GMT)
commit62b8c978ee6b8d135d9e7953221de58000dba986 (patch)
tree683b04b2e627f6710c22c151b23c8cc9a165315e /drivers/staging/octeon
parent78fd82238d0e5716578c326404184a27ba67fd6e (diff)
downloadlinux-fsl-qoriq-62b8c978ee6b8d135d9e7953221de58000dba986.tar.xz
Rewind v3.13-rc3+ (78fd82238d0e5716) to v3.12
Diffstat (limited to 'drivers/staging/octeon')
-rw-r--r--drivers/staging/octeon/ethernet-rx.c15
-rw-r--r--drivers/staging/octeon/ethernet-spi.c92
-rw-r--r--drivers/staging/octeon/ethernet-tx.c71
-rw-r--r--drivers/staging/octeon/ethernet.c24
4 files changed, 103 insertions, 99 deletions
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index 0315f60..e14a1bb 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -72,7 +72,7 @@ struct cvm_oct_core_state {
int baseline_cores;
/*
* The number of additional cores that could be processing
- * input packets.
+ * input packtes.
*/
atomic_t available_cores;
cpumask_t cpu_state;
@@ -80,8 +80,6 @@ struct cvm_oct_core_state {
static struct cvm_oct_core_state core_state __cacheline_aligned_in_smp;
-static int cvm_irq_cpu;
-
static void cvm_oct_enable_napi(void *_)
{
int cpu = smp_processor_id();
@@ -114,7 +112,11 @@ static void cvm_oct_no_more_work(void)
{
int cpu = smp_processor_id();
- if (cpu == cvm_irq_cpu) {
+ /*
+ * CPU zero is special. It always has the irq enabled when
+ * waiting for incoming packets.
+ */
+ if (cpu == 0) {
enable_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group);
return;
}
@@ -133,7 +135,6 @@ static irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id)
{
/* Disable the IRQ and start napi_poll. */
disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group);
- cvm_irq_cpu = smp_processor_id();
cvm_oct_enable_napi(NULL);
return IRQ_HANDLED;
@@ -513,7 +514,7 @@ void cvm_oct_rx_initialize(void)
if (NULL == dev_for_napi)
panic("No net_devices were allocated.");
- if (max_rx_cpus >= 1 && max_rx_cpus < num_online_cpus())
+ if (max_rx_cpus > 1 && max_rx_cpus < num_online_cpus())
atomic_set(&core_state.available_cores, max_rx_cpus);
else
atomic_set(&core_state.available_cores, num_online_cpus());
@@ -525,7 +526,7 @@ void cvm_oct_rx_initialize(void)
cvm_oct_napi_poll, rx_napi_weight);
napi_enable(&cvm_oct_napi[i].napi);
}
- /* Register an IRQ handler to receive POW interrupts */
+ /* Register an IRQ hander for to receive POW interrupts */
i = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group,
cvm_oct_do_interrupt, 0, "Ethernet", cvm_oct_device);
diff --git a/drivers/staging/octeon/ethernet-spi.c b/drivers/staging/octeon/ethernet-spi.c
index 5108bc0..af8d628 100644
--- a/drivers/staging/octeon/ethernet-spi.c
+++ b/drivers/staging/octeon/ethernet-spi.c
@@ -64,23 +64,31 @@ static irqreturn_t cvm_oct_spi_rml_interrupt(int cpl, void *dev_id)
if (spx_int_reg.s.spf)
pr_err("SPI1: SRX Spi4 interface down\n");
if (spx_int_reg.s.calerr)
- pr_err("SPI1: SRX Spi4 Calendar table parity error\n");
+ pr_err("SPI1: SRX Spi4 Calendar table "
+ "parity error\n");
if (spx_int_reg.s.syncerr)
- pr_err("SPI1: SRX Consecutive Spi4 DIP4 errors have exceeded SPX_ERR_CTL[ERRCNT]\n");
+ pr_err("SPI1: SRX Consecutive Spi4 DIP4 "
+ "errors have exceeded "
+ "SPX_ERR_CTL[ERRCNT]\n");
if (spx_int_reg.s.diperr)
pr_err("SPI1: SRX Spi4 DIP4 error\n");
if (spx_int_reg.s.tpaovr)
- pr_err("SPI1: SRX Selected port has hit TPA overflow\n");
+ pr_err("SPI1: SRX Selected port has hit "
+ "TPA overflow\n");
if (spx_int_reg.s.rsverr)
- pr_err("SPI1: SRX Spi4 reserved control word detected\n");
+ pr_err("SPI1: SRX Spi4 reserved control "
+ "word detected\n");
if (spx_int_reg.s.drwnng)
- pr_err("SPI1: SRX Spi4 receive FIFO drowning/overflow\n");
+ pr_err("SPI1: SRX Spi4 receive FIFO "
+ "drowning/overflow\n");
if (spx_int_reg.s.clserr)
- pr_err("SPI1: SRX Spi4 packet closed on non-16B alignment without EOP\n");
+ pr_err("SPI1: SRX Spi4 packet closed on "
+ "non-16B alignment without EOP\n");
if (spx_int_reg.s.spiovr)
pr_err("SPI1: SRX Spi4 async FIFO overflow\n");
if (spx_int_reg.s.abnorm)
- pr_err("SPI1: SRX Abnormal packet termination (ERR bit)\n");
+ pr_err("SPI1: SRX Abnormal packet "
+ "termination (ERR bit)\n");
if (spx_int_reg.s.prtnxa)
pr_err("SPI1: SRX Port out of range\n");
}
@@ -91,23 +99,31 @@ static irqreturn_t cvm_oct_spi_rml_interrupt(int cpl, void *dev_id)
stx_int_reg.u64 &= cvmx_read_csr(CVMX_STXX_INT_MSK(1));
if (stx_int_reg.s.syncerr)
- pr_err("SPI1: STX Interface encountered a fatal error\n");
+ pr_err("SPI1: STX Interface encountered a "
+ "fatal error\n");
if (stx_int_reg.s.frmerr)
- pr_err("SPI1: STX FRMCNT has exceeded STX_DIP_CNT[MAXFRM]\n");
+ pr_err("SPI1: STX FRMCNT has exceeded "
+ "STX_DIP_CNT[MAXFRM]\n");
if (stx_int_reg.s.unxfrm)
- pr_err("SPI1: STX Unexpected framing sequence\n");
+ pr_err("SPI1: STX Unexpected framing "
+ "sequence\n");
if (stx_int_reg.s.nosync)
- pr_err("SPI1: STX ERRCNT has exceeded STX_DIP_CNT[MAXDIP]\n");
+ pr_err("SPI1: STX ERRCNT has exceeded "
+ "STX_DIP_CNT[MAXDIP]\n");
if (stx_int_reg.s.diperr)
- pr_err("SPI1: STX DIP2 error on the Spi4 Status channel\n");
+ pr_err("SPI1: STX DIP2 error on the Spi4 "
+ "Status channel\n");
if (stx_int_reg.s.datovr)
pr_err("SPI1: STX Spi4 FIFO overflow error\n");
if (stx_int_reg.s.ovrbst)
- pr_err("SPI1: STX Transmit packet burst too big\n");
+ pr_err("SPI1: STX Transmit packet burst "
+ "too big\n");
if (stx_int_reg.s.calpar1)
- pr_err("SPI1: STX Calendar Table Parity Error Bank1\n");
+ pr_err("SPI1: STX Calendar Table Parity "
+ "Error Bank1\n");
if (stx_int_reg.s.calpar0)
- pr_err("SPI1: STX Calendar Table Parity Error Bank0\n");
+ pr_err("SPI1: STX Calendar Table Parity "
+ "Error Bank0\n");
}
cvmx_write_csr(CVMX_SPXX_INT_MSK(1), 0);
@@ -128,23 +144,31 @@ static irqreturn_t cvm_oct_spi_rml_interrupt(int cpl, void *dev_id)
if (spx_int_reg.s.spf)
pr_err("SPI0: SRX Spi4 interface down\n");
if (spx_int_reg.s.calerr)
- pr_err("SPI0: SRX Spi4 Calendar table parity error\n");
+ pr_err("SPI0: SRX Spi4 Calendar table "
+ "parity error\n");
if (spx_int_reg.s.syncerr)
- pr_err("SPI0: SRX Consecutive Spi4 DIP4 errors have exceeded SPX_ERR_CTL[ERRCNT]\n");
+ pr_err("SPI0: SRX Consecutive Spi4 DIP4 "
+ "errors have exceeded "
+ "SPX_ERR_CTL[ERRCNT]\n");
if (spx_int_reg.s.diperr)
pr_err("SPI0: SRX Spi4 DIP4 error\n");
if (spx_int_reg.s.tpaovr)
- pr_err("SPI0: SRX Selected port has hit TPA overflow\n");
+ pr_err("SPI0: SRX Selected port has hit "
+ "TPA overflow\n");
if (spx_int_reg.s.rsverr)
- pr_err("SPI0: SRX Spi4 reserved control word detected\n");
+ pr_err("SPI0: SRX Spi4 reserved control "
+ "word detected\n");
if (spx_int_reg.s.drwnng)
- pr_err("SPI0: SRX Spi4 receive FIFO drowning/overflow\n");
+ pr_err("SPI0: SRX Spi4 receive FIFO "
+ "drowning/overflow\n");
if (spx_int_reg.s.clserr)
- pr_err("SPI0: SRX Spi4 packet closed on non-16B alignment without EOP\n");
+ pr_err("SPI0: SRX Spi4 packet closed on "
+ "non-16B alignment without EOP\n");
if (spx_int_reg.s.spiovr)
pr_err("SPI0: SRX Spi4 async FIFO overflow\n");
if (spx_int_reg.s.abnorm)
- pr_err("SPI0: SRX Abnormal packet termination (ERR bit)\n");
+ pr_err("SPI0: SRX Abnormal packet "
+ "termination (ERR bit)\n");
if (spx_int_reg.s.prtnxa)
pr_err("SPI0: SRX Port out of range\n");
}
@@ -155,23 +179,31 @@ static irqreturn_t cvm_oct_spi_rml_interrupt(int cpl, void *dev_id)
stx_int_reg.u64 &= cvmx_read_csr(CVMX_STXX_INT_MSK(0));
if (stx_int_reg.s.syncerr)
- pr_err("SPI0: STX Interface encountered a fatal error\n");
+ pr_err("SPI0: STX Interface encountered a "
+ "fatal error\n");
if (stx_int_reg.s.frmerr)
- pr_err("SPI0: STX FRMCNT has exceeded STX_DIP_CNT[MAXFRM]\n");
+ pr_err("SPI0: STX FRMCNT has exceeded "
+ "STX_DIP_CNT[MAXFRM]\n");
if (stx_int_reg.s.unxfrm)
- pr_err("SPI0: STX Unexpected framing sequence\n");
+ pr_err("SPI0: STX Unexpected framing "
+ "sequence\n");
if (stx_int_reg.s.nosync)
- pr_err("SPI0: STX ERRCNT has exceeded STX_DIP_CNT[MAXDIP]\n");
+ pr_err("SPI0: STX ERRCNT has exceeded "
+ "STX_DIP_CNT[MAXDIP]\n");
if (stx_int_reg.s.diperr)
- pr_err("SPI0: STX DIP2 error on the Spi4 Status channel\n");
+ pr_err("SPI0: STX DIP2 error on the Spi4 "
+ "Status channel\n");
if (stx_int_reg.s.datovr)
pr_err("SPI0: STX Spi4 FIFO overflow error\n");
if (stx_int_reg.s.ovrbst)
- pr_err("SPI0: STX Transmit packet burst too big\n");
+ pr_err("SPI0: STX Transmit packet burst "
+ "too big\n");
if (stx_int_reg.s.calpar1)
- pr_err("SPI0: STX Calendar Table Parity Error Bank1\n");
+ pr_err("SPI0: STX Calendar Table Parity "
+ "Error Bank1\n");
if (stx_int_reg.s.calpar0)
- pr_err("SPI0: STX Calendar Table Parity Error Bank0\n");
+ pr_err("SPI0: STX Calendar Table Parity "
+ "Error Bank0\n");
}
cvmx_write_csr(CVMX_SPXX_INT_MSK(0), 0);
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index 9b4d0b5..5631dd9 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -78,12 +78,10 @@ static DECLARE_TASKLET(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup, 0);
static inline int32_t cvm_oct_adjust_skb_to_free(int32_t skb_to_free, int fau)
{
int32_t undo;
- undo = skb_to_free > 0 ? MAX_SKB_TO_FREE : skb_to_free +
- MAX_SKB_TO_FREE;
+ undo = skb_to_free > 0 ? MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE;
if (undo > 0)
cvmx_fau_atomic_add32(fau, -undo);
- skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ? MAX_SKB_TO_FREE :
- -skb_to_free;
+ skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ? MAX_SKB_TO_FREE : -skb_to_free;
return skb_to_free;
}
@@ -110,10 +108,8 @@ void cvm_oct_free_tx_skbs(struct net_device *dev)
for (qos = 0; qos < queues_per_port; qos++) {
if (skb_queue_len(&priv->tx_free_list[qos]) == 0)
continue;
- skb_to_free = cvmx_fau_fetch_and_add32(priv->fau+qos*4,
- MAX_SKB_TO_FREE);
- skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
- priv->fau+qos*4);
+ skb_to_free = cvmx_fau_fetch_and_add32(priv->fau+qos*4, MAX_SKB_TO_FREE);
+ skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau+qos*4);
total_freed += skb_to_free;
@@ -121,14 +117,12 @@ void cvm_oct_free_tx_skbs(struct net_device *dev)
struct sk_buff *to_free_list = NULL;
spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
while (skb_to_free > 0) {
- struct sk_buff *t;
- t = __skb_dequeue(&priv->tx_free_list[qos]);
+ struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]);
t->next = to_free_list;
to_free_list = t;
skb_to_free--;
}
- spin_unlock_irqrestore(&priv->tx_free_list[qos].lock,
- flags);
+ spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
/* Do the actual freeing outside of the lock. */
while (to_free_list) {
struct sk_buff *t = to_free_list;
@@ -217,23 +211,15 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(__skb_linearize(skb))) {
queue_type = QUEUE_DROP;
if (USE_ASYNC_IOBDMA) {
- /*
- * Get the number of skbuffs in use
- * by the hardware
- */
+ /* Get the number of skbuffs in use by the hardware */
CVMX_SYNCIOBDMA;
- skb_to_free =
- cvmx_scratch_read64(CVMX_SCR_SCRATCH);
+ skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
} else {
- /*
- * Get the number of skbuffs in use
- * by the hardware
- */
- skb_to_free = cvmx_fau_fetch_and_add32(
- priv->fau + qos * 4, MAX_SKB_TO_FREE);
+ /* Get the number of skbuffs in use by the hardware */
+ skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
+ MAX_SKB_TO_FREE);
}
- skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
- priv->fau + qos * 4);
+ skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau + qos * 4);
spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
goto skip_xmit;
}
@@ -290,9 +276,7 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
struct skb_frag_struct *fs = skb_shinfo(skb)->frags + i;
- hw_buffer.s.addr = XKPHYS_TO_PHYS(
- (u64)(page_address(fs->page.p) +
- fs->page_offset));
+ hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)(page_address(fs->page.p) + fs->page_offset));
hw_buffer.s.size = fs->size;
CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64;
}
@@ -374,9 +358,7 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
*/
pko_command.s.dontfree = 0;
- hw_buffer.s.back = ((unsigned long)skb->data >> 7) -
- ((unsigned long)fpa_head >> 7);
-
+ hw_buffer.s.back = ((unsigned long)skb->data >> 7) - ((unsigned long)fpa_head >> 7);
*(struct sk_buff **)(fpa_head - sizeof(void *)) = skb;
/*
@@ -440,22 +422,17 @@ dont_put_skbuff_in_hw:
queue_type = QUEUE_HW;
}
if (USE_ASYNC_IOBDMA)
- cvmx_fau_async_fetch_and_add32(
- CVMX_SCR_SCRATCH, FAU_TOTAL_TX_TO_CLEAN, 1);
+ cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH, FAU_TOTAL_TX_TO_CLEAN, 1);
spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
/* Drop this packet if we have too many already queued to the HW */
- if (unlikely(skb_queue_len(&priv->tx_free_list[qos]) >=
- MAX_OUT_QUEUE_DEPTH)) {
-
+ if (unlikely(skb_queue_len(&priv->tx_free_list[qos]) >= MAX_OUT_QUEUE_DEPTH)) {
if (dev->tx_queue_len != 0) {
/* Drop the lock when notifying the core. */
- spin_unlock_irqrestore(&priv->tx_free_list[qos].lock,
- flags);
+ spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
netif_stop_queue(dev);
- spin_lock_irqsave(&priv->tx_free_list[qos].lock,
- flags);
+ spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
} else {
/* If not using normal queueing. */
queue_type = QUEUE_DROP;
@@ -471,8 +448,7 @@ dont_put_skbuff_in_hw:
priv->queue + qos,
pko_command, hw_buffer,
CVMX_PKO_LOCK_NONE))) {
- printk_ratelimited("%s: Failed to send the packet\n",
- dev->name);
+ printk_ratelimited("%s: Failed to send the packet\n", dev->name);
queue_type = QUEUE_DROP;
}
skip_xmit:
@@ -517,8 +493,7 @@ skip_xmit:
cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2);
} else {
- total_to_clean = cvmx_fau_fetch_and_add32(
- FAU_TOTAL_TX_TO_CLEAN, 1);
+ total_to_clean = cvmx_fau_fetch_and_add32(FAU_TOTAL_TX_TO_CLEAN, 1);
}
if (total_to_clean & 0x3ff) {
@@ -552,8 +527,8 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
/* Get a work queue entry */
cvmx_wqe_t *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
if (unlikely(work == NULL)) {
- printk_ratelimited("%s: Failed to allocate a work queue entry\n",
- dev->name);
+ printk_ratelimited("%s: Failed to allocate a work "
+ "queue entry\n", dev->name);
priv->stats.tx_dropped++;
dev_kfree_skb(skb);
return 0;
@@ -734,7 +709,7 @@ void cvm_oct_tx_initialize(void)
/* Disable the interrupt. */
cvmx_write_csr(CVMX_CIU_TIMX(1), 0);
- /* Register an IRQ handler to receive CIU_TIMX(1) interrupts */
+ /* Register an IRQ hander for to receive CIU_TIMX(1) interrupts */
i = request_irq(OCTEON_IRQ_TIMER1,
cvm_oct_tx_cleanup_watchdog, 0,
"Ethernet", cvm_oct_device);
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index bd6ca71..c3a90e7 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -163,13 +163,11 @@ static void cvm_oct_periodic_worker(struct work_struct *work)
if (priv->poll)
priv->poll(cvm_oct_device[priv->port]);
- cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats(
- cvm_oct_device[priv->port]);
+ cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats(cvm_oct_device[priv->port]);
if (!atomic_read(&cvm_oct_poll_queue_stopping))
- queue_delayed_work(cvm_oct_poll_queue,
- &priv->port_periodic_work, HZ);
-}
+ queue_delayed_work(cvm_oct_poll_queue, &priv->port_periodic_work, HZ);
+ }
static void cvm_oct_configure_common_hw(void)
{
@@ -455,7 +453,7 @@ int cvm_oct_common_init(struct net_device *dev)
if (priv->of_node)
mac = of_get_mac_address(priv->of_node);
- if (mac)
+ if (mac && is_valid_ether_addr(mac))
memcpy(dev->dev_addr, mac, ETH_ALEN);
else
eth_hw_addr_random(dev);
@@ -586,8 +584,8 @@ static const struct net_device_ops cvm_oct_pow_netdev_ops = {
extern void octeon_mdiobus_force_mod_depencency(void);
-static struct device_node *cvm_oct_of_get_child(
- const struct device_node *parent, int reg_val)
+static struct device_node *cvm_oct_of_get_child(const struct device_node *parent,
+ int reg_val)
{
struct device_node *node = NULL;
int size;
@@ -605,7 +603,7 @@ static struct device_node *cvm_oct_of_get_child(
}
static struct device_node *cvm_oct_node_for_port(struct device_node *pip,
- int interface, int port)
+ int interface, int port)
{
struct device_node *ni, *np;
@@ -715,8 +713,7 @@ static int cvm_oct_probe(struct platform_device *pdev)
int port;
int port_index;
- for (port_index = 0,
- port = cvmx_helper_get_ipd_port(interface, 0);
+ for (port_index = 0, port = cvmx_helper_get_ipd_port(interface, 0);
port < cvmx_helper_get_ipd_port(interface, num_ports);
port_index++, port++) {
struct octeon_ethernet *priv;
@@ -729,8 +726,7 @@ static int cvm_oct_probe(struct platform_device *pdev)
/* Initialize the device private structure. */
priv = netdev_priv(dev);
- priv->of_node = cvm_oct_node_for_port(pip, interface,
- port_index);
+ priv->of_node = cvm_oct_node_for_port(pip, interface, port_index);
INIT_DELAYED_WORK(&priv->port_periodic_work,
cvm_oct_periodic_worker);
@@ -797,7 +793,7 @@ static int cvm_oct_probe(struct platform_device *pdev)
cvmx_pko_get_num_queues(priv->port) *
sizeof(uint32_t);
queue_delayed_work(cvm_oct_poll_queue,
- &priv->port_periodic_work, HZ);
+ &priv->port_periodic_work, HZ);
}
}
}