From 15877e9c8a12ced38ac31d8bf4f93f3634fbea3f Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 8 Jan 2012 12:04:05 +0000 Subject: NET: sa11x0-ir: fix documentation bug Spell the module parameter correctly in comments. Signed-off-by: Russell King diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c index da27050..751f2a9 100644 --- a/drivers/net/irda/sa1100_ir.c +++ b/drivers/net/irda/sa1100_ir.c @@ -15,7 +15,7 @@ * This driver takes one kernel command line parameter, sa1100ir=, with * the following options: * max_rate:baudrate - set the maximum baud rate - * power_leve:level - set the transmitter power level + * power_level:level - set the transmitter power level * tx_lpm:0|1 - set transmit low power mode */ #include -- cgit v0.10.2 From 22f0bf96de1506081a8b18ad3e0d04d5add70a4a Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 8 Jan 2012 13:55:23 +0000 Subject: NET: sa11x0-ir: handle DMA mapping errors properly Handle DMA mapping errors in the rx skb allocation and tx paths. Signed-off-by: Russell King diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c index 751f2a9..84fecce 100644 --- a/drivers/net/irda/sa1100_ir.c +++ b/drivers/net/irda/sa1100_ir.c @@ -82,7 +82,6 @@ static int sa1100_irda_rx_alloc(struct sa1100_irda *si) return 0; si->rxskb = alloc_skb(HPSIR_MAX_RXLEN + 1, GFP_ATOMIC); - if (!si->rxskb) { printk(KERN_ERR "sa1100_ir: out of memory for RX SKB\n"); return -ENOMEM; @@ -97,6 +96,11 @@ static int sa1100_irda_rx_alloc(struct sa1100_irda *si) si->rxbuf_dma = dma_map_single(si->dev, si->rxskb->data, HPSIR_MAX_RXLEN, DMA_FROM_DEVICE); + if (dma_mapping_error(si->dev, si->rxbuf_dma)) { + dev_kfree_skb_any(si->rxskb); + return -ENOMEM; + } + return 0; } @@ -518,7 +522,8 @@ static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev netif_rx(skb); } else { /* - * Remap the buffer. + * Remap the buffer - it was previously mapped, and we + * hope that this succeeds. */ si->rxbuf_dma = dma_map_single(si->dev, si->rxskb->data, HPSIR_MAX_RXLEN, @@ -701,6 +706,13 @@ static int sa1100_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev) si->txskb = skb; si->txbuf_dma = dma_map_single(si->dev, skb->data, skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(si->dev, si->txbuf_dma)) { + si->txskb = NULL; + netif_wake_queue(dev); + dev->stats.tx_dropped++; + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } sa1100_start_dma(si->txdma, si->txbuf_dma, skb->len); -- cgit v0.10.2 From d32386086b4a250bd71125f8d760cfffada0e422 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 8 Jan 2012 12:07:24 +0000 Subject: NET: sa11x0-ir: set netdev's parent struct device Add the missing SET_NETDEV_DEV() call to set the parent device correctly for this network interface. Signed-off-by: Russell King diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c index 84fecce..149a3b4 100644 --- a/drivers/net/irda/sa1100_ir.c +++ b/drivers/net/irda/sa1100_ir.c @@ -919,6 +919,8 @@ static int sa1100_irda_probe(struct platform_device *pdev) if (!dev) goto err_mem_4; + SET_NETDEV_DEV(dev, &pdev->dev); + si = netdev_priv(dev); si->dev = &pdev->dev; si->pdata = pdev->dev.platform_data; -- cgit v0.10.2 From e556fdbde38f68d87f689473b112cc65ddacd6a4 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 8 Jan 2012 12:02:17 +0000 Subject: NET: sa11x0-ir: obtain interrupt number from platform resources Convert the sa11x0-ir driver to obtain its interrupt number from the platform device resources, rather than via the asm/irq.h include. Signed-off-by: Russell King diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c index 149a3b4..2f5bf0b 100644 --- a/drivers/net/irda/sa1100_ir.c +++ b/drivers/net/irda/sa1100_ir.c @@ -35,7 +35,6 @@ #include #include -#include #include #include #include @@ -900,11 +899,15 @@ static int sa1100_irda_probe(struct platform_device *pdev) struct net_device *dev; struct sa1100_irda *si; unsigned int baudrate_mask; - int err; + int err, irq; if (!pdev->dev.platform_data) return -EINVAL; + irq = platform_get_irq(pdev, 0); + if (irq <= 0) + return irq < 0 ? irq : -ENXIO; + err = request_mem_region(__PREG(Ser2UTCR0), 0x24, "IrDA") ? 0 : -EBUSY; if (err) goto err_mem_1; @@ -936,7 +939,7 @@ static int sa1100_irda_probe(struct platform_device *pdev) goto err_mem_5; dev->netdev_ops = &sa1100_irda_netdev_ops; - dev->irq = IRQ_Ser2ICP; + dev->irq = irq; irda_init_max_qos_capabilies(&si->qos); -- cgit v0.10.2 From 885767ca4ce0800c5d02eb66cc10a0494b7bf312 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 8 Jan 2012 12:53:22 +0000 Subject: NET: sa11x0-ir: containerize DMA data Both the transmit and receive DMA store identical data: the skb, dma address, and the dma registers. Move this data into its own data structure. The following replacements were used: rxskb -> dma_rx.skb rxbuf_dma -> dma_rx.dma rxdma -> dma_rx.regs txskb -> dma_tx.skb txbuf_dma -> dma_tx.dma txdma -> dma_tx.regs Signed-off-by: Russell King diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c index 2f5bf0b..adb7fea 100644 --- a/drivers/net/irda/sa1100_ir.c +++ b/drivers/net/irda/sa1100_ir.c @@ -43,6 +43,12 @@ static int power_level = 3; static int tx_lpm; static int max_rate = 4000000; +struct sa1100_buf { + struct sk_buff *skb; + dma_addr_t dma; + dma_regs_t *regs; +}; + struct sa1100_irda { unsigned char hscr0; unsigned char utcr4; @@ -52,12 +58,8 @@ struct sa1100_irda { int speed; int newspeed; - struct sk_buff *txskb; - struct sk_buff *rxskb; - dma_addr_t txbuf_dma; - dma_addr_t rxbuf_dma; - dma_regs_t *txdma; - dma_regs_t *rxdma; + struct sa1100_buf dma_rx; + struct sa1100_buf dma_tx; struct device *dev; struct irda_platform_data *pdata; @@ -77,11 +79,11 @@ struct sa1100_irda { */ static int sa1100_irda_rx_alloc(struct sa1100_irda *si) { - if (si->rxskb) + if (si->dma_rx.skb) return 0; - si->rxskb = alloc_skb(HPSIR_MAX_RXLEN + 1, GFP_ATOMIC); - if (!si->rxskb) { + si->dma_rx.skb = alloc_skb(HPSIR_MAX_RXLEN + 1, GFP_ATOMIC); + if (!si->dma_rx.skb) { printk(KERN_ERR "sa1100_ir: out of memory for RX SKB\n"); return -ENOMEM; } @@ -90,13 +92,13 @@ static int sa1100_irda_rx_alloc(struct sa1100_irda *si) * Align any IP headers that may be contained * within the frame. */ - skb_reserve(si->rxskb, 1); + skb_reserve(si->dma_rx.skb, 1); - si->rxbuf_dma = dma_map_single(si->dev, si->rxskb->data, + si->dma_rx.dma = dma_map_single(si->dev, si->dma_rx.skb->data, HPSIR_MAX_RXLEN, DMA_FROM_DEVICE); - if (dma_mapping_error(si->dev, si->rxbuf_dma)) { - dev_kfree_skb_any(si->rxskb); + if (dma_mapping_error(si->dev, si->dma_rx.dma)) { + dev_kfree_skb_any(si->dma_rx.skb); return -ENOMEM; } @@ -109,7 +111,7 @@ static int sa1100_irda_rx_alloc(struct sa1100_irda *si) */ static void sa1100_irda_rx_dma_start(struct sa1100_irda *si) { - if (!si->rxskb) { + if (!si->dma_rx.skb) { printk(KERN_ERR "sa1100_ir: rx buffer went missing\n"); return; } @@ -122,8 +124,8 @@ static void sa1100_irda_rx_dma_start(struct sa1100_irda *si) /* * Enable the DMA, receiver and receive interrupt. */ - sa1100_clear_dma(si->rxdma); - sa1100_start_dma(si->rxdma, si->rxbuf_dma, HPSIR_MAX_RXLEN); + sa1100_clear_dma(si->dma_rx.regs); + sa1100_start_dma(si->dma_rx.regs, si->dma_rx.dma, HPSIR_MAX_RXLEN); Ser2HSCR0 = si->hscr0 | HSCR0_HSSP | HSCR0_RXE; } @@ -144,7 +146,7 @@ static int sa1100_irda_set_speed(struct sa1100_irda *si, int speed) * Stop the receive DMA. */ if (IS_FIR(si)) - sa1100_stop_dma(si->rxdma); + sa1100_stop_dma(si->dma_rx.regs); local_irq_save(flags); @@ -280,8 +282,8 @@ static void sa1100_irda_shutdown(struct sa1100_irda *si) /* * Stop all DMA activity. */ - sa1100_stop_dma(si->rxdma); - sa1100_stop_dma(si->txdma); + sa1100_stop_dma(si->dma_rx.regs); + sa1100_stop_dma(si->dma_tx.regs); /* Disable the port. */ Ser2UTCR3 = 0; @@ -460,7 +462,7 @@ static void sa1100_irda_hpsir_irq(struct net_device *dev) static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev) { - struct sk_buff *skb = si->rxskb; + struct sk_buff *skb = si->dma_rx.skb; dma_addr_t dma_addr; unsigned int len, stat, data; @@ -472,11 +474,11 @@ static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev /* * Get the current data position. */ - dma_addr = sa1100_get_dma_pos(si->rxdma); - len = dma_addr - si->rxbuf_dma; + dma_addr = sa1100_get_dma_pos(si->dma_rx.regs); + len = dma_addr - si->dma_rx.dma; if (len > HPSIR_MAX_RXLEN) len = HPSIR_MAX_RXLEN; - dma_unmap_single(si->dev, si->rxbuf_dma, len, DMA_FROM_DEVICE); + dma_unmap_single(si->dev, si->dma_rx.dma, len, DMA_FROM_DEVICE); do { /* @@ -504,7 +506,7 @@ static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev } while (Ser2HSSR0 & HSSR0_EIF); if (stat & HSSR1_EOF) { - si->rxskb = NULL; + si->dma_rx.skb = NULL; skb_put(skb, len); skb->dev = dev; @@ -524,7 +526,7 @@ static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev * Remap the buffer - it was previously mapped, and we * hope that this succeeds. */ - si->rxbuf_dma = dma_map_single(si->dev, si->rxskb->data, + si->dma_rx.dma = dma_map_single(si->dev, si->dma_rx.skb->data, HPSIR_MAX_RXLEN, DMA_FROM_DEVICE); } @@ -543,7 +545,7 @@ static void sa1100_irda_fir_irq(struct net_device *dev) /* * Stop RX DMA */ - sa1100_stop_dma(si->rxdma); + sa1100_stop_dma(si->dma_rx.regs); /* * Framing error - we throw away the packet completely. @@ -600,9 +602,9 @@ static void sa1100_irda_txdma_irq(void *id) { struct net_device *dev = id; struct sa1100_irda *si = netdev_priv(dev); - struct sk_buff *skb = si->txskb; + struct sk_buff *skb = si->dma_tx.skb; - si->txskb = NULL; + si->dma_tx.skb = NULL; /* * Wait for the transmission to complete. Unfortunately, @@ -620,7 +622,7 @@ static void sa1100_irda_txdma_irq(void *id) /* * Do we need to change speed? Note that we're lazy - * here - we don't free the old rxskb. We don't need + * here - we don't free the old dma_rx.skb. We don't need * to allocate a buffer either. */ if (si->newspeed) { @@ -638,7 +640,7 @@ static void sa1100_irda_txdma_irq(void *id) * Account and free the packet. */ if (skb) { - dma_unmap_single(si->dev, si->txbuf_dma, skb->len, DMA_TO_DEVICE); + dma_unmap_single(si->dev, si->dma_tx.dma, skb->len, DMA_TO_DEVICE); dev->stats.tx_packets ++; dev->stats.tx_bytes += skb->len; dev_kfree_skb_irq(skb); @@ -698,22 +700,22 @@ static int sa1100_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev) /* * We must not be transmitting... */ - BUG_ON(si->txskb); + BUG_ON(si->dma_tx.skb); netif_stop_queue(dev); - si->txskb = skb; - si->txbuf_dma = dma_map_single(si->dev, skb->data, + si->dma_tx.skb = skb; + si->dma_tx.dma = dma_map_single(si->dev, skb->data, skb->len, DMA_TO_DEVICE); - if (dma_mapping_error(si->dev, si->txbuf_dma)) { - si->txskb = NULL; + if (dma_mapping_error(si->dev, si->dma_tx.dma)) { + si->dma_tx.skb = NULL; netif_wake_queue(dev); dev->stats.tx_dropped++; dev_kfree_skb(skb); return NETDEV_TX_OK; } - sa1100_start_dma(si->txdma, si->txbuf_dma, skb->len); + sa1100_start_dma(si->dma_tx.regs, si->dma_tx.dma, skb->len); /* * If we have a mean turn-around time, impose the specified @@ -785,12 +787,12 @@ static int sa1100_irda_start(struct net_device *dev) goto err_irq; err = sa1100_request_dma(DMA_Ser2HSSPRd, "IrDA receive", - NULL, NULL, &si->rxdma); + NULL, NULL, &si->dma_rx.regs); if (err) goto err_rx_dma; err = sa1100_request_dma(DMA_Ser2HSSPWr, "IrDA transmit", - sa1100_irda_txdma_irq, dev, &si->txdma); + sa1100_irda_txdma_irq, dev, &si->dma_tx.regs); if (err) goto err_tx_dma; @@ -827,9 +829,9 @@ err_irlap: si->open = 0; sa1100_irda_shutdown(si); err_startup: - sa1100_free_dma(si->txdma); + sa1100_free_dma(si->dma_tx.regs); err_tx_dma: - sa1100_free_dma(si->rxdma); + sa1100_free_dma(si->dma_rx.regs); err_rx_dma: free_irq(dev->irq, dev); err_irq: @@ -847,11 +849,11 @@ static int sa1100_irda_stop(struct net_device *dev) * If we have been doing DMA receive, make sure we * tidy that up cleanly. */ - if (si->rxskb) { - dma_unmap_single(si->dev, si->rxbuf_dma, HPSIR_MAX_RXLEN, + if (si->dma_rx.skb) { + dma_unmap_single(si->dev, si->dma_rx.dma, HPSIR_MAX_RXLEN, DMA_FROM_DEVICE); - dev_kfree_skb(si->rxskb); - si->rxskb = NULL; + dev_kfree_skb(si->dma_rx.skb); + si->dma_rx.skb = NULL; } /* Stop IrLAP */ @@ -866,8 +868,8 @@ static int sa1100_irda_stop(struct net_device *dev) /* * Free resources */ - sa1100_free_dma(si->txdma); - sa1100_free_dma(si->rxdma); + sa1100_free_dma(si->dma_tx.regs); + sa1100_free_dma(si->dma_rx.regs); free_irq(dev->irq, dev); sa1100_set_power(si, 0); -- cgit v0.10.2 From ba84525bd9015e7dd20f7c97a2a96b0a238b0223 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 8 Jan 2012 15:38:15 +0000 Subject: NET: sa11x0-ir: fix leak of tx skb Ensure that we unmap and free a pending transmit skb when the interface is stopped. We rearrange the code a little bit to give all places a similar layout when freeing the skb in both the completion and interface stop paths - this gives some consistency to the code. Signed-off-by: Russell King diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c index adb7fea..9dc5648 100644 --- a/drivers/net/irda/sa1100_ir.c +++ b/drivers/net/irda/sa1100_ir.c @@ -602,9 +602,7 @@ static void sa1100_irda_txdma_irq(void *id) { struct net_device *dev = id; struct sa1100_irda *si = netdev_priv(dev); - struct sk_buff *skb = si->dma_tx.skb; - - si->dma_tx.skb = NULL; + struct sk_buff *skb; /* * Wait for the transmission to complete. Unfortunately, @@ -636,14 +634,15 @@ static void sa1100_irda_txdma_irq(void *id) */ sa1100_irda_rx_dma_start(si); - /* - * Account and free the packet. - */ + /* Account and free the packet. */ + skb = si->dma_tx.skb; if (skb) { - dma_unmap_single(si->dev, si->dma_tx.dma, skb->len, DMA_TO_DEVICE); + dma_unmap_single(si->dev, si->dma_tx.dma, skb->len, + DMA_TO_DEVICE); dev->stats.tx_packets ++; dev->stats.tx_bytes += skb->len; dev_kfree_skb_irq(skb); + si->dma_tx.skb = NULL; } /* @@ -841,21 +840,31 @@ err_irq: static int sa1100_irda_stop(struct net_device *dev) { struct sa1100_irda *si = netdev_priv(dev); + struct sk_buff *skb; disable_irq(dev->irq); sa1100_irda_shutdown(si); /* - * If we have been doing DMA receive, make sure we + * If we have been doing any DMA activity, make sure we * tidy that up cleanly. */ - if (si->dma_rx.skb) { + skb = si->dma_rx.skb; + if (skb) { dma_unmap_single(si->dev, si->dma_rx.dma, HPSIR_MAX_RXLEN, DMA_FROM_DEVICE); - dev_kfree_skb(si->dma_rx.skb); + dev_kfree_skb(skb); si->dma_rx.skb = NULL; } + skb = si->dma_tx.skb; + if (skb) { + dma_unmap_single(si->dev, si->dma_tx.dma, skb->len, + DMA_TO_DEVICE); + dev_kfree_skb(skb); + si->dma_tx.skb = NULL; + } + /* Stop IrLAP */ if (si->irlap) { irlap_close(si->irlap); -- cgit v0.10.2 From cbe1d24fb70751ef14801338aa945e807ba63a90 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 8 Jan 2012 16:40:07 +0000 Subject: NET: sa11x0-ir: move sa1100_irda_{startup,shutdown,suspend,resume} Places these functions in better locations in the file, near where they are used. This saves some tiresome paging up/down. Signed-off-by: Russell King diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c index 9dc5648..32ac4a4 100644 --- a/drivers/net/irda/sa1100_ir.c +++ b/drivers/net/irda/sa1100_ir.c @@ -230,137 +230,6 @@ sa1100_set_power(struct sa1100_irda *si, unsigned int state) return ret; } -static int sa1100_irda_startup(struct sa1100_irda *si) -{ - int ret; - - /* - * Ensure that the ports for this device are setup correctly. - */ - if (si->pdata->startup) { - ret = si->pdata->startup(si->dev); - if (ret) - return ret; - } - - /* - * Configure PPC for IRDA - we want to drive TXD2 low. - * We also want to drive this pin low during sleep. - */ - PPSR &= ~PPC_TXD2; - PSDR &= ~PPC_TXD2; - PPDR |= PPC_TXD2; - - /* - * Enable HP-SIR modulation, and ensure that the port is disabled. - */ - Ser2UTCR3 = 0; - Ser2HSCR0 = HSCR0_UART; - Ser2UTCR4 = si->utcr4; - Ser2UTCR0 = UTCR0_8BitData; - Ser2HSCR2 = HSCR2_TrDataH | HSCR2_RcDataL; - - /* - * Clear status register - */ - Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID; - - ret = sa1100_irda_set_speed(si, si->speed = 9600); - if (ret) { - Ser2UTCR3 = 0; - Ser2HSCR0 = 0; - - if (si->pdata->shutdown) - si->pdata->shutdown(si->dev); - } - - return ret; -} - -static void sa1100_irda_shutdown(struct sa1100_irda *si) -{ - /* - * Stop all DMA activity. - */ - sa1100_stop_dma(si->dma_rx.regs); - sa1100_stop_dma(si->dma_tx.regs); - - /* Disable the port. */ - Ser2UTCR3 = 0; - Ser2HSCR0 = 0; - - if (si->pdata->shutdown) - si->pdata->shutdown(si->dev); -} - -#ifdef CONFIG_PM -/* - * Suspend the IrDA interface. - */ -static int sa1100_irda_suspend(struct platform_device *pdev, pm_message_t state) -{ - struct net_device *dev = platform_get_drvdata(pdev); - struct sa1100_irda *si; - - if (!dev) - return 0; - - si = netdev_priv(dev); - if (si->open) { - /* - * Stop the transmit queue - */ - netif_device_detach(dev); - disable_irq(dev->irq); - sa1100_irda_shutdown(si); - __sa1100_irda_set_power(si, 0); - } - - return 0; -} - -/* - * Resume the IrDA interface. - */ -static int sa1100_irda_resume(struct platform_device *pdev) -{ - struct net_device *dev = platform_get_drvdata(pdev); - struct sa1100_irda *si; - - if (!dev) - return 0; - - si = netdev_priv(dev); - if (si->open) { - /* - * If we missed a speed change, initialise at the new speed - * directly. It is debatable whether this is actually - * required, but in the interests of continuing from where - * we left off it is desirable. The converse argument is - * that we should re-negotiate at 9600 baud again. - */ - if (si->newspeed) { - si->speed = si->newspeed; - si->newspeed = 0; - } - - sa1100_irda_startup(si); - __sa1100_irda_set_power(si, si->power); - enable_irq(dev->irq); - - /* - * This automatically wakes up the queue - */ - netif_device_attach(dev); - } - - return 0; -} -#else -#define sa1100_irda_suspend NULL -#define sa1100_irda_resume NULL -#endif - /* * HP-SIR format interrupt service routines. */ @@ -774,6 +643,69 @@ sa1100_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd) return ret; } +static int sa1100_irda_startup(struct sa1100_irda *si) +{ + int ret; + + /* + * Ensure that the ports for this device are setup correctly. + */ + if (si->pdata->startup) { + ret = si->pdata->startup(si->dev); + if (ret) + return ret; + } + + /* + * Configure PPC for IRDA - we want to drive TXD2 low. + * We also want to drive this pin low during sleep. + */ + PPSR &= ~PPC_TXD2; + PSDR &= ~PPC_TXD2; + PPDR |= PPC_TXD2; + + /* + * Enable HP-SIR modulation, and ensure that the port is disabled. + */ + Ser2UTCR3 = 0; + Ser2HSCR0 = HSCR0_UART; + Ser2UTCR4 = si->utcr4; + Ser2UTCR0 = UTCR0_8BitData; + Ser2HSCR2 = HSCR2_TrDataH | HSCR2_RcDataL; + + /* + * Clear status register + */ + Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID; + + ret = sa1100_irda_set_speed(si, si->speed = 9600); + if (ret) { + Ser2UTCR3 = 0; + Ser2HSCR0 = 0; + + if (si->pdata->shutdown) + si->pdata->shutdown(si->dev); + } + + return ret; +} + +static void sa1100_irda_shutdown(struct sa1100_irda *si) +{ + /* + * Stop all DMA activity. + */ + sa1100_stop_dma(si->dma_rx.regs); + sa1100_stop_dma(si->dma_tx.regs); + + /* Disable the port. */ + Ser2UTCR3 = 0; + Ser2HSCR0 = 0; + + if (si->pdata->shutdown) + si->pdata->shutdown(si->dev); +} + static int sa1100_irda_start(struct net_device *dev) { struct sa1100_irda *si = netdev_priv(dev); @@ -1024,6 +956,74 @@ static int sa1100_irda_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM +/* + * Suspend the IrDA interface. + */ +static int sa1100_irda_suspend(struct platform_device *pdev, pm_message_t state) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct sa1100_irda *si; + + if (!dev) + return 0; + + si = netdev_priv(dev); + if (si->open) { + /* + * Stop the transmit queue + */ + netif_device_detach(dev); + disable_irq(dev->irq); + sa1100_irda_shutdown(si); + __sa1100_irda_set_power(si, 0); + } + + return 0; +} + +/* + * Resume the IrDA interface. + */ +static int sa1100_irda_resume(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct sa1100_irda *si; + + if (!dev) + return 0; + + si = netdev_priv(dev); + if (si->open) { + /* + * If we missed a speed change, initialise at the new speed + * directly. It is debatable whether this is actually + * required, but in the interests of continuing from where + * we left off it is desirable. The converse argument is + * that we should re-negotiate at 9600 baud again. + */ + if (si->newspeed) { + si->speed = si->newspeed; + si->newspeed = 0; + } + + sa1100_irda_startup(si); + __sa1100_irda_set_power(si, si->power); + enable_irq(dev->irq); + + /* + * This automatically wakes up the queue + */ + netif_device_attach(dev); + } + + return 0; +} +#else +#define sa1100_irda_suspend NULL +#define sa1100_irda_resume NULL +#endif + static struct platform_driver sa1100ir_driver = { .probe = sa1100_irda_probe, .remove = sa1100_irda_remove, -- cgit v0.10.2 From 0e888ee31566c3f5071474ddd68457a7ad2ae5ac Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 8 Jan 2012 16:30:44 +0000 Subject: NET: sa11x0-ir: factor out speed checks Whenever we complete a transmit, we always check for a speed change. This check was open coded in several places. Provide a helper function to do this instead. Signed-off-by: Russell King diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c index 32ac4a4..bfae34f 100644 --- a/drivers/net/irda/sa1100_ir.c +++ b/drivers/net/irda/sa1100_ir.c @@ -70,6 +70,8 @@ struct sa1100_irda { iobuff_t rx_buff; }; +static int sa1100_irda_set_speed(struct sa1100_irda *, int); + #define IS_FIR(si) ((si)->speed >= 4000000) #define HPSIR_MAX_RXLEN 2047 @@ -129,6 +131,14 @@ static void sa1100_irda_rx_dma_start(struct sa1100_irda *si) Ser2HSCR0 = si->hscr0 | HSCR0_HSSP | HSCR0_RXE; } +static void sa1100_irda_check_speed(struct sa1100_irda *si) +{ + if (si->newspeed) { + sa1100_irda_set_speed(si, si->newspeed); + si->newspeed = 0; + } +} + /* * Set the IrDA communications speed. */ @@ -318,10 +328,7 @@ static void sa1100_irda_hpsir_irq(struct net_device *dev) Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID; Ser2UTCR3 = UTCR3_RIE | UTCR3_RXE | UTCR3_TXE; - if (si->newspeed) { - sa1100_irda_set_speed(si, si->newspeed); - si->newspeed = 0; - } + sa1100_irda_check_speed(si); /* I'm hungry! */ netif_wake_queue(dev); @@ -492,10 +499,7 @@ static void sa1100_irda_txdma_irq(void *id) * here - we don't free the old dma_rx.skb. We don't need * to allocate a buffer either. */ - if (si->newspeed) { - sa1100_irda_set_speed(si, si->newspeed); - si->newspeed = 0; - } + sa1100_irda_check_speed(si); /* * Start reception. This disables the transmitter for @@ -538,10 +542,7 @@ static int sa1100_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev) * If this is an empty frame, we can bypass a lot. */ if (skb->len == 0) { - if (si->newspeed) { - si->newspeed = 0; - sa1100_irda_set_speed(si, speed); - } + sa1100_irda_check_speed(si); dev_kfree_skb(skb); return NETDEV_TX_OK; } -- cgit v0.10.2 From 3d26db137ac3169623a132ea310d26af6a48bf88 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 8 Jan 2012 16:16:39 +0000 Subject: NET: sa11x0-ir: split SIR and FIR tx functions Split the SIR and FIR transmit functions, as they behave differently. Signed-off-by: Russell King diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c index bfae34f..8b8cf415 100644 --- a/drivers/net/irda/sa1100_ir.c +++ b/drivers/net/irda/sa1100_ir.c @@ -68,6 +68,8 @@ struct sa1100_irda { iobuff_t tx_buff; iobuff_t rx_buff; + + int (*tx_start)(struct sk_buff *, struct net_device *, struct sa1100_irda *); }; static int sa1100_irda_set_speed(struct sa1100_irda *, int); @@ -140,6 +142,63 @@ static void sa1100_irda_check_speed(struct sa1100_irda *si) } /* + * HP-SIR format support. + */ +static int sa1100_irda_sir_tx_start(struct sk_buff *skb, struct net_device *dev, + struct sa1100_irda *si) +{ + si->tx_buff.data = si->tx_buff.head; + si->tx_buff.len = async_wrap_skb(skb, si->tx_buff.data, + si->tx_buff.truesize); + + /* + * Set the transmit interrupt enable. This will fire off an + * interrupt immediately. Note that we disable the receiver + * so we won't get spurious characters received. + */ + Ser2UTCR3 = UTCR3_TIE | UTCR3_TXE; + + dev_kfree_skb(skb); + + return NETDEV_TX_OK; +} + +/* + * FIR format support. + */ +static int sa1100_irda_fir_tx_start(struct sk_buff *skb, struct net_device *dev, + struct sa1100_irda *si) +{ + int mtt = irda_get_mtt(skb); + + si->dma_tx.skb = skb; + si->dma_tx.dma = dma_map_single(si->dev, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(si->dev, si->dma_tx.dma)) { + si->dma_tx.skb = NULL; + netif_wake_queue(dev); + dev->stats.tx_dropped++; + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + + sa1100_start_dma(si->dma_tx.regs, si->dma_tx.dma, skb->len); + + /* + * If we have a mean turn-around time, impose the specified + * specified delay. We could shorten this by timing from + * the point we received the packet. + */ + if (mtt) + udelay(mtt); + + Ser2HSCR0 = si->hscr0 | HSCR0_HSSP | HSCR0_TXE; + + return NETDEV_TX_OK; +} + + +/* * Set the IrDA communications speed. */ static int sa1100_irda_set_speed(struct sa1100_irda *si, int speed) @@ -176,6 +235,7 @@ static int sa1100_irda_set_speed(struct sa1100_irda *si, int speed) si->pdata->set_speed(si->dev, speed); si->speed = speed; + si->tx_start = sa1100_irda_sir_tx_start; local_irq_restore(flags); ret = 0; @@ -191,6 +251,7 @@ static int sa1100_irda_set_speed(struct sa1100_irda *si, int speed) Ser2UTCR3 = 0; si->speed = speed; + si->tx_start = sa1100_irda_fir_tx_start; if (si->pdata->set_speed) si->pdata->set_speed(si->dev, speed); @@ -538,66 +599,19 @@ static int sa1100_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev) if (speed != si->speed && speed != -1) si->newspeed = speed; - /* - * If this is an empty frame, we can bypass a lot. - */ + /* If this is an empty frame, we can bypass a lot. */ if (skb->len == 0) { sa1100_irda_check_speed(si); dev_kfree_skb(skb); return NETDEV_TX_OK; } - if (!IS_FIR(si)) { - netif_stop_queue(dev); - - si->tx_buff.data = si->tx_buff.head; - si->tx_buff.len = async_wrap_skb(skb, si->tx_buff.data, - si->tx_buff.truesize); - - /* - * Set the transmit interrupt enable. This will fire - * off an interrupt immediately. Note that we disable - * the receiver so we won't get spurious characteres - * received. - */ - Ser2UTCR3 = UTCR3_TIE | UTCR3_TXE; - - dev_kfree_skb(skb); - } else { - int mtt = irda_get_mtt(skb); - - /* - * We must not be transmitting... - */ - BUG_ON(si->dma_tx.skb); - - netif_stop_queue(dev); - - si->dma_tx.skb = skb; - si->dma_tx.dma = dma_map_single(si->dev, skb->data, - skb->len, DMA_TO_DEVICE); - if (dma_mapping_error(si->dev, si->dma_tx.dma)) { - si->dma_tx.skb = NULL; - netif_wake_queue(dev); - dev->stats.tx_dropped++; - dev_kfree_skb(skb); - return NETDEV_TX_OK; - } - - sa1100_start_dma(si->dma_tx.regs, si->dma_tx.dma, skb->len); - - /* - * If we have a mean turn-around time, impose the specified - * specified delay. We could shorten this by timing from - * the point we received the packet. - */ - if (mtt) - udelay(mtt); + netif_stop_queue(dev); - Ser2HSCR0 = si->hscr0 | HSCR0_HSSP | HSCR0_TXE; - } + /* We must not already have a skb to transmit... */ + BUG_ON(si->dma_tx.skb); - return NETDEV_TX_OK; + return si->tx_start(skb, dev, si); } static int -- cgit v0.10.2 From 374f77390ca99b401ee121616524ed32c54d5ad6 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 8 Jan 2012 16:26:15 +0000 Subject: NET: sa11x0-ir: indirect handling of SIR and FIR interrupts Use the same method for doing this as we do for the tx_start functions. Signed-off-by: Russell King diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c index 8b8cf415..32dee33 100644 --- a/drivers/net/irda/sa1100_ir.c +++ b/drivers/net/irda/sa1100_ir.c @@ -70,6 +70,7 @@ struct sa1100_irda { iobuff_t rx_buff; int (*tx_start)(struct sk_buff *, struct net_device *, struct sa1100_irda *); + irqreturn_t (*irq)(struct net_device *, struct sa1100_irda *); }; static int sa1100_irda_set_speed(struct sa1100_irda *, int); @@ -197,6 +198,8 @@ static int sa1100_irda_fir_tx_start(struct sk_buff *skb, struct net_device *dev, return NETDEV_TX_OK; } +static irqreturn_t sa1100_irda_sir_irq(struct net_device *, struct sa1100_irda *); +static irqreturn_t sa1100_irda_fir_irq(struct net_device *, struct sa1100_irda *); /* * Set the IrDA communications speed. @@ -236,6 +239,7 @@ static int sa1100_irda_set_speed(struct sa1100_irda *si, int speed) si->speed = speed; si->tx_start = sa1100_irda_sir_tx_start; + si->irq = sa1100_irda_sir_irq; local_irq_restore(flags); ret = 0; @@ -252,6 +256,7 @@ static int sa1100_irda_set_speed(struct sa1100_irda *si, int speed) si->speed = speed; si->tx_start = sa1100_irda_fir_tx_start; + si->irq = sa1100_irda_fir_irq; if (si->pdata->set_speed) si->pdata->set_speed(si->dev, speed); @@ -304,9 +309,8 @@ sa1100_set_power(struct sa1100_irda *si, unsigned int state) /* * HP-SIR format interrupt service routines. */ -static void sa1100_irda_hpsir_irq(struct net_device *dev) +static irqreturn_t sa1100_irda_sir_irq(struct net_device *dev, struct sa1100_irda *si) { - struct sa1100_irda *si = netdev_priv(dev); int status; status = Ser2UTSR0; @@ -395,6 +399,8 @@ static void sa1100_irda_hpsir_irq(struct net_device *dev) netif_wake_queue(dev); } } + + return IRQ_HANDLED; } static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev) @@ -475,10 +481,8 @@ static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev * * No matter what, we disable RX, process, and the restart RX. */ -static void sa1100_irda_fir_irq(struct net_device *dev) +static irqreturn_t sa1100_irda_fir_irq(struct net_device *dev, struct sa1100_irda *si) { - struct sa1100_irda *si = netdev_priv(dev); - /* * Stop RX DMA */ @@ -520,16 +524,16 @@ static void sa1100_irda_fir_irq(struct net_device *dev) * No matter what happens, we must restart reception. */ sa1100_irda_rx_dma_start(si); + + return IRQ_HANDLED; } static irqreturn_t sa1100_irda_irq(int irq, void *dev_id) { struct net_device *dev = dev_id; - if (IS_FIR(((struct sa1100_irda *)netdev_priv(dev)))) - sa1100_irda_fir_irq(dev); - else - sa1100_irda_hpsir_irq(dev); - return IRQ_HANDLED; + struct sa1100_irda *si = netdev_priv(dev); + + return si->irq(dev, si); } /* @@ -728,10 +732,6 @@ static int sa1100_irda_start(struct net_device *dev) si->speed = 9600; - err = request_irq(dev->irq, sa1100_irda_irq, 0, dev->name, dev); - if (err) - goto err_irq; - err = sa1100_request_dma(DMA_Ser2HSSPRd, "IrDA receive", NULL, NULL, &si->dma_rx.regs); if (err) @@ -743,11 +743,6 @@ static int sa1100_irda_start(struct net_device *dev) goto err_tx_dma; /* - * The interrupt must remain disabled for now. - */ - disable_irq(dev->irq); - - /* * Setup the serial port for the specified speed. */ err = sa1100_irda_startup(si); @@ -762,15 +757,21 @@ static int sa1100_irda_start(struct net_device *dev) if (!si->irlap) goto err_irlap; + err = request_irq(dev->irq, sa1100_irda_irq, 0, dev->name, dev); + if (err) + goto err_irq; + /* * Now enable the interrupt and start the queue */ si->open = 1; sa1100_set_power(si, power_level); /* low power mode */ - enable_irq(dev->irq); + netif_start_queue(dev); return 0; +err_irq: + irlap_close(si->irlap); err_irlap: si->open = 0; sa1100_irda_shutdown(si); @@ -779,8 +780,6 @@ err_startup: err_tx_dma: sa1100_free_dma(si->dma_rx.regs); err_rx_dma: - free_irq(dev->irq, dev); -err_irq: return err; } @@ -789,7 +788,9 @@ static int sa1100_irda_stop(struct net_device *dev) struct sa1100_irda *si = netdev_priv(dev); struct sk_buff *skb; - disable_irq(dev->irq); + netif_stop_queue(dev); + + si->open = 0; sa1100_irda_shutdown(si); /* @@ -818,9 +819,6 @@ static int sa1100_irda_stop(struct net_device *dev) si->irlap = NULL; } - netif_stop_queue(dev); - si->open = 0; - /* * Free resources */ -- cgit v0.10.2 From a6b2ea66d630ad0687a1ac25d5a6afb282bd364a Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 8 Jan 2012 17:10:01 +0000 Subject: NET: sa11x0-ir: move SIR and FIR interrupt support Move the interrupt handlers to the SIR and FIR sections of the file. This improves the localization of the protocol handlers. Signed-off-by: Russell King diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c index 32dee33..61b42d1 100644 --- a/drivers/net/irda/sa1100_ir.c +++ b/drivers/net/irda/sa1100_ir.c @@ -164,151 +164,6 @@ static int sa1100_irda_sir_tx_start(struct sk_buff *skb, struct net_device *dev, return NETDEV_TX_OK; } -/* - * FIR format support. - */ -static int sa1100_irda_fir_tx_start(struct sk_buff *skb, struct net_device *dev, - struct sa1100_irda *si) -{ - int mtt = irda_get_mtt(skb); - - si->dma_tx.skb = skb; - si->dma_tx.dma = dma_map_single(si->dev, skb->data, skb->len, - DMA_TO_DEVICE); - if (dma_mapping_error(si->dev, si->dma_tx.dma)) { - si->dma_tx.skb = NULL; - netif_wake_queue(dev); - dev->stats.tx_dropped++; - dev_kfree_skb(skb); - return NETDEV_TX_OK; - } - - sa1100_start_dma(si->dma_tx.regs, si->dma_tx.dma, skb->len); - - /* - * If we have a mean turn-around time, impose the specified - * specified delay. We could shorten this by timing from - * the point we received the packet. - */ - if (mtt) - udelay(mtt); - - Ser2HSCR0 = si->hscr0 | HSCR0_HSSP | HSCR0_TXE; - - return NETDEV_TX_OK; -} - -static irqreturn_t sa1100_irda_sir_irq(struct net_device *, struct sa1100_irda *); -static irqreturn_t sa1100_irda_fir_irq(struct net_device *, struct sa1100_irda *); - -/* - * Set the IrDA communications speed. - */ -static int sa1100_irda_set_speed(struct sa1100_irda *si, int speed) -{ - unsigned long flags; - int brd, ret = -EINVAL; - - switch (speed) { - case 9600: case 19200: case 38400: - case 57600: case 115200: - brd = 3686400 / (16 * speed) - 1; - - /* - * Stop the receive DMA. - */ - if (IS_FIR(si)) - sa1100_stop_dma(si->dma_rx.regs); - - local_irq_save(flags); - - Ser2UTCR3 = 0; - Ser2HSCR0 = HSCR0_UART; - - Ser2UTCR1 = brd >> 8; - Ser2UTCR2 = brd; - - /* - * Clear status register - */ - Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID; - Ser2UTCR3 = UTCR3_RIE | UTCR3_RXE | UTCR3_TXE; - - if (si->pdata->set_speed) - si->pdata->set_speed(si->dev, speed); - - si->speed = speed; - si->tx_start = sa1100_irda_sir_tx_start; - si->irq = sa1100_irda_sir_irq; - - local_irq_restore(flags); - ret = 0; - break; - - case 4000000: - local_irq_save(flags); - - si->hscr0 = 0; - - Ser2HSSR0 = 0xff; - Ser2HSCR0 = si->hscr0 | HSCR0_HSSP; - Ser2UTCR3 = 0; - - si->speed = speed; - si->tx_start = sa1100_irda_fir_tx_start; - si->irq = sa1100_irda_fir_irq; - - if (si->pdata->set_speed) - si->pdata->set_speed(si->dev, speed); - - sa1100_irda_rx_alloc(si); - sa1100_irda_rx_dma_start(si); - - local_irq_restore(flags); - - break; - - default: - break; - } - - return ret; -} - -/* - * Control the power state of the IrDA transmitter. - * State: - * 0 - off - * 1 - short range, lowest power - * 2 - medium range, medium power - * 3 - maximum range, high power - * - * Currently, only assabet is known to support this. - */ -static int -__sa1100_irda_set_power(struct sa1100_irda *si, unsigned int state) -{ - int ret = 0; - if (si->pdata->set_power) - ret = si->pdata->set_power(si->dev, state); - return ret; -} - -static inline int -sa1100_set_power(struct sa1100_irda *si, unsigned int state) -{ - int ret; - - ret = __sa1100_irda_set_power(si, state); - if (ret == 0) - si->power = state; - - return ret; -} - -/* - * HP-SIR format interrupt service routines. - */ static irqreturn_t sa1100_irda_sir_irq(struct net_device *dev, struct sa1100_irda *si) { int status; @@ -403,6 +258,40 @@ static irqreturn_t sa1100_irda_sir_irq(struct net_device *dev, struct sa1100_ird return IRQ_HANDLED; } +/* + * FIR format support. + */ +static int sa1100_irda_fir_tx_start(struct sk_buff *skb, struct net_device *dev, + struct sa1100_irda *si) +{ + int mtt = irda_get_mtt(skb); + + si->dma_tx.skb = skb; + si->dma_tx.dma = dma_map_single(si->dev, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(si->dev, si->dma_tx.dma)) { + si->dma_tx.skb = NULL; + netif_wake_queue(dev); + dev->stats.tx_dropped++; + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + + sa1100_start_dma(si->dma_tx.regs, si->dma_tx.dma, skb->len); + + /* + * If we have a mean turn-around time, impose the specified + * specified delay. We could shorten this by timing from + * the point we received the packet. + */ + if (mtt) + udelay(mtt); + + Ser2HSCR0 = si->hscr0 | HSCR0_HSSP | HSCR0_TXE; + + return NETDEV_TX_OK; +} + static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev) { struct sk_buff *skb = si->dma_rx.skb; @@ -476,10 +365,8 @@ static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev } /* - * FIR format interrupt service routine. We only have to - * handle RX events; transmit events go via the TX DMA handler. - * - * No matter what, we disable RX, process, and the restart RX. + * We only have to handle RX events here; transmit events go via the TX + * DMA handler. We disable RX, process, and the restart RX. */ static irqreturn_t sa1100_irda_fir_irq(struct net_device *dev, struct sa1100_irda *si) { @@ -528,6 +415,111 @@ static irqreturn_t sa1100_irda_fir_irq(struct net_device *dev, struct sa1100_ird return IRQ_HANDLED; } +/* + * Set the IrDA communications speed. + */ +static int sa1100_irda_set_speed(struct sa1100_irda *si, int speed) +{ + unsigned long flags; + int brd, ret = -EINVAL; + + switch (speed) { + case 9600: case 19200: case 38400: + case 57600: case 115200: + brd = 3686400 / (16 * speed) - 1; + + /* + * Stop the receive DMA. + */ + if (IS_FIR(si)) + sa1100_stop_dma(si->dma_rx.regs); + + local_irq_save(flags); + + Ser2UTCR3 = 0; + Ser2HSCR0 = HSCR0_UART; + + Ser2UTCR1 = brd >> 8; + Ser2UTCR2 = brd; + + /* + * Clear status register + */ + Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID; + Ser2UTCR3 = UTCR3_RIE | UTCR3_RXE | UTCR3_TXE; + + if (si->pdata->set_speed) + si->pdata->set_speed(si->dev, speed); + + si->speed = speed; + si->tx_start = sa1100_irda_sir_tx_start; + si->irq = sa1100_irda_sir_irq; + + local_irq_restore(flags); + ret = 0; + break; + + case 4000000: + local_irq_save(flags); + + si->hscr0 = 0; + + Ser2HSSR0 = 0xff; + Ser2HSCR0 = si->hscr0 | HSCR0_HSSP; + Ser2UTCR3 = 0; + + si->speed = speed; + si->tx_start = sa1100_irda_fir_tx_start; + si->irq = sa1100_irda_fir_irq; + + if (si->pdata->set_speed) + si->pdata->set_speed(si->dev, speed); + + sa1100_irda_rx_alloc(si); + sa1100_irda_rx_dma_start(si); + + local_irq_restore(flags); + + break; + + default: + break; + } + + return ret; +} + +/* + * Control the power state of the IrDA transmitter. + * State: + * 0 - off + * 1 - short range, lowest power + * 2 - medium range, medium power + * 3 - maximum range, high power + * + * Currently, only assabet is known to support this. + */ +static int +__sa1100_irda_set_power(struct sa1100_irda *si, unsigned int state) +{ + int ret = 0; + if (si->pdata->set_power) + ret = si->pdata->set_power(si->dev, state); + return ret; +} + +static inline int +sa1100_set_power(struct sa1100_irda *si, unsigned int state) +{ + int ret; + + ret = __sa1100_irda_set_power(si, state); + if (ret == 0) + si->power = state; + + return ret; +} + static irqreturn_t sa1100_irda_irq(int irq, void *dev_id) { struct net_device *dev = dev_id; -- cgit v0.10.2 From 26f2bee1a3063ddd89f76a75b99adb32636f3513 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 8 Jan 2012 17:48:02 +0000 Subject: NET: sa11x0-ir: move sa1100_irda_txdma_irq Move the FIR DMA transmit completion function along-side the other FIR protocol functions. Signed-off-by: Russell King diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c index 61b42d1..0b5a2e2 100644 --- a/drivers/net/irda/sa1100_ir.c +++ b/drivers/net/irda/sa1100_ir.c @@ -261,6 +261,57 @@ static irqreturn_t sa1100_irda_sir_irq(struct net_device *dev, struct sa1100_ird /* * FIR format support. */ +static void sa1100_irda_firtxdma_irq(void *id) +{ + struct net_device *dev = id; + struct sa1100_irda *si = netdev_priv(dev); + struct sk_buff *skb; + + /* + * Wait for the transmission to complete. Unfortunately, + * the hardware doesn't give us an interrupt to indicate + * "end of frame". + */ + do + rmb(); + while (!(Ser2HSSR0 & HSSR0_TUR) || Ser2HSSR1 & HSSR1_TBY); + + /* + * Clear the transmit underrun bit. + */ + Ser2HSSR0 = HSSR0_TUR; + + /* + * Do we need to change speed? Note that we're lazy + * here - we don't free the old dma_rx.skb. We don't need + * to allocate a buffer either. + */ + sa1100_irda_check_speed(si); + + /* + * Start reception. This disables the transmitter for + * us. This will be using the existing RX buffer. + */ + sa1100_irda_rx_dma_start(si); + + /* Account and free the packet. */ + skb = si->dma_tx.skb; + if (skb) { + dma_unmap_single(si->dev, si->dma_tx.dma, skb->len, + DMA_TO_DEVICE); + dev->stats.tx_packets ++; + dev->stats.tx_bytes += skb->len; + dev_kfree_skb_irq(skb); + si->dma_tx.skb = NULL; + } + + /* + * Make sure that the TX queue is available for sending + * (for retries). TX has priority over RX at all times. + */ + netif_wake_queue(dev); +} + static int sa1100_irda_fir_tx_start(struct sk_buff *skb, struct net_device *dev, struct sa1100_irda *si) { @@ -528,60 +579,6 @@ static irqreturn_t sa1100_irda_irq(int irq, void *dev_id) return si->irq(dev, si); } -/* - * TX DMA completion handler. - */ -static void sa1100_irda_txdma_irq(void *id) -{ - struct net_device *dev = id; - struct sa1100_irda *si = netdev_priv(dev); - struct sk_buff *skb; - - /* - * Wait for the transmission to complete. Unfortunately, - * the hardware doesn't give us an interrupt to indicate - * "end of frame". - */ - do - rmb(); - while (!(Ser2HSSR0 & HSSR0_TUR) || Ser2HSSR1 & HSSR1_TBY); - - /* - * Clear the transmit underrun bit. - */ - Ser2HSSR0 = HSSR0_TUR; - - /* - * Do we need to change speed? Note that we're lazy - * here - we don't free the old dma_rx.skb. We don't need - * to allocate a buffer either. - */ - sa1100_irda_check_speed(si); - - /* - * Start reception. This disables the transmitter for - * us. This will be using the existing RX buffer. - */ - sa1100_irda_rx_dma_start(si); - - /* Account and free the packet. */ - skb = si->dma_tx.skb; - if (skb) { - dma_unmap_single(si->dev, si->dma_tx.dma, skb->len, - DMA_TO_DEVICE); - dev->stats.tx_packets ++; - dev->stats.tx_bytes += skb->len; - dev_kfree_skb_irq(skb); - si->dma_tx.skb = NULL; - } - - /* - * Make sure that the TX queue is available for sending - * (for retries). TX has priority over RX at all times. - */ - netif_wake_queue(dev); -} - static int sa1100_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev) { struct sa1100_irda *si = netdev_priv(dev); @@ -730,7 +727,8 @@ static int sa1100_irda_start(struct net_device *dev) goto err_rx_dma; err = sa1100_request_dma(DMA_Ser2HSSPWr, "IrDA transmit", - sa1100_irda_txdma_irq, dev, &si->dma_tx.regs); + sa1100_irda_firtxdma_irq, dev, + &si->dma_tx.regs); if (err) goto err_tx_dma; -- cgit v0.10.2 From 6a7f4911a470fede7d40746487fb1e4a95657efd Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 8 Jan 2012 20:49:28 +0000 Subject: NET: sa11x0-ir: get rid of si->hscr0 si->hscr0 is initialized to zero, and never changed. Get rid of this redundant variable. Signed-off-by: Russell King diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c index 0b5a2e2..d428dd0 100644 --- a/drivers/net/irda/sa1100_ir.c +++ b/drivers/net/irda/sa1100_ir.c @@ -50,7 +50,6 @@ struct sa1100_buf { }; struct sa1100_irda { - unsigned char hscr0; unsigned char utcr4; unsigned char power; unsigned char open; @@ -124,14 +123,14 @@ static void sa1100_irda_rx_dma_start(struct sa1100_irda *si) /* * First empty receive FIFO */ - Ser2HSCR0 = si->hscr0 | HSCR0_HSSP; + Ser2HSCR0 = HSCR0_HSSP; /* * Enable the DMA, receiver and receive interrupt. */ sa1100_clear_dma(si->dma_rx.regs); sa1100_start_dma(si->dma_rx.regs, si->dma_rx.dma, HPSIR_MAX_RXLEN); - Ser2HSCR0 = si->hscr0 | HSCR0_HSSP | HSCR0_RXE; + Ser2HSCR0 = HSCR0_HSSP | HSCR0_RXE; } static void sa1100_irda_check_speed(struct sa1100_irda *si) @@ -338,7 +337,7 @@ static int sa1100_irda_fir_tx_start(struct sk_buff *skb, struct net_device *dev, if (mtt) udelay(mtt); - Ser2HSCR0 = si->hscr0 | HSCR0_HSSP | HSCR0_TXE; + Ser2HSCR0 = HSCR0_HSSP | HSCR0_TXE; return NETDEV_TX_OK; } @@ -440,7 +439,7 @@ static irqreturn_t sa1100_irda_fir_irq(struct net_device *dev, struct sa1100_ird /* * Clear out the DMA... */ - Ser2HSCR0 = si->hscr0 | HSCR0_HSSP; + Ser2HSCR0 = HSCR0_HSSP; /* * Clear selected status bits now, so we @@ -513,10 +512,8 @@ static int sa1100_irda_set_speed(struct sa1100_irda *si, int speed) case 4000000: local_irq_save(flags); - si->hscr0 = 0; - Ser2HSSR0 = 0xff; - Ser2HSCR0 = si->hscr0 | HSCR0_HSSP; + Ser2HSCR0 = HSCR0_HSSP; Ser2UTCR3 = 0; si->speed = speed; -- cgit v0.10.2 From 32273f50608e9b98116622e32187cbd139c09716 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 12 Jan 2012 12:45:00 +0000 Subject: NET: sa11x0-ir: convert to use scatterlist DMA API Convert the sa11x0 IrDA driver to use the scatterlist DMA API. This is a preparatory patch for converting the driver to use the DMA engine API, which requires a struct scatterlist for every transfer. Signed-off-by: Russell King diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c index d428dd0..86d296e 100644 --- a/drivers/net/irda/sa1100_ir.c +++ b/drivers/net/irda/sa1100_ir.c @@ -45,7 +45,7 @@ static int max_rate = 4000000; struct sa1100_buf { struct sk_buff *skb; - dma_addr_t dma; + struct scatterlist sg; dma_regs_t *regs; }; @@ -98,10 +98,8 @@ static int sa1100_irda_rx_alloc(struct sa1100_irda *si) */ skb_reserve(si->dma_rx.skb, 1); - si->dma_rx.dma = dma_map_single(si->dev, si->dma_rx.skb->data, - HPSIR_MAX_RXLEN, - DMA_FROM_DEVICE); - if (dma_mapping_error(si->dev, si->dma_rx.dma)) { + sg_set_buf(&si->dma_rx.sg, si->dma_rx.skb->data, HPSIR_MAX_RXLEN); + if (dma_map_sg(si->dev, &si->dma_rx.sg, 1, DMA_FROM_DEVICE) == 0) { dev_kfree_skb_any(si->dma_rx.skb); return -ENOMEM; } @@ -129,7 +127,8 @@ static void sa1100_irda_rx_dma_start(struct sa1100_irda *si) * Enable the DMA, receiver and receive interrupt. */ sa1100_clear_dma(si->dma_rx.regs); - sa1100_start_dma(si->dma_rx.regs, si->dma_rx.dma, HPSIR_MAX_RXLEN); + sa1100_start_dma(si->dma_rx.regs, sg_dma_address(&si->dma_rx.sg), + sg_dma_len(&si->dma_rx.sg)); Ser2HSCR0 = HSCR0_HSSP | HSCR0_RXE; } @@ -296,8 +295,8 @@ static void sa1100_irda_firtxdma_irq(void *id) /* Account and free the packet. */ skb = si->dma_tx.skb; if (skb) { - dma_unmap_single(si->dev, si->dma_tx.dma, skb->len, - DMA_TO_DEVICE); + dma_unmap_sg(si->dev, &si->dma_tx.sg, 1, + DMA_TO_DEVICE); dev->stats.tx_packets ++; dev->stats.tx_bytes += skb->len; dev_kfree_skb_irq(skb); @@ -317,9 +316,8 @@ static int sa1100_irda_fir_tx_start(struct sk_buff *skb, struct net_device *dev, int mtt = irda_get_mtt(skb); si->dma_tx.skb = skb; - si->dma_tx.dma = dma_map_single(si->dev, skb->data, skb->len, - DMA_TO_DEVICE); - if (dma_mapping_error(si->dev, si->dma_tx.dma)) { + sg_set_buf(&si->dma_tx.sg, skb->data, skb->len); + if (dma_map_sg(si->dev, &si->dma_tx.sg, 1, DMA_TO_DEVICE) == 0) { si->dma_tx.skb = NULL; netif_wake_queue(dev); dev->stats.tx_dropped++; @@ -327,7 +325,8 @@ static int sa1100_irda_fir_tx_start(struct sk_buff *skb, struct net_device *dev, return NETDEV_TX_OK; } - sa1100_start_dma(si->dma_tx.regs, si->dma_tx.dma, skb->len); + sa1100_start_dma(si->dma_tx.regs, sg_dma_address(&si->dma_tx.sg), + sg_dma_len(&si->dma_tx.sg)); /* * If we have a mean turn-around time, impose the specified @@ -357,10 +356,10 @@ static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev * Get the current data position. */ dma_addr = sa1100_get_dma_pos(si->dma_rx.regs); - len = dma_addr - si->dma_rx.dma; + len = dma_addr - sg_dma_address(&si->dma_rx.sg); if (len > HPSIR_MAX_RXLEN) len = HPSIR_MAX_RXLEN; - dma_unmap_single(si->dev, si->dma_rx.dma, len, DMA_FROM_DEVICE); + dma_unmap_sg(si->dev, &si->dma_rx.sg, 1, DMA_FROM_DEVICE); do { /* @@ -408,9 +407,7 @@ static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev * Remap the buffer - it was previously mapped, and we * hope that this succeeds. */ - si->dma_rx.dma = dma_map_single(si->dev, si->dma_rx.skb->data, - HPSIR_MAX_RXLEN, - DMA_FROM_DEVICE); + dma_map_sg(si->dev, &si->dma_rx.sg, 1, DMA_FROM_DEVICE); } } @@ -786,16 +783,16 @@ static int sa1100_irda_stop(struct net_device *dev) */ skb = si->dma_rx.skb; if (skb) { - dma_unmap_single(si->dev, si->dma_rx.dma, HPSIR_MAX_RXLEN, - DMA_FROM_DEVICE); + dma_unmap_sg(si->dev, &si->dma_rx.sg, 1, + DMA_FROM_DEVICE); dev_kfree_skb(skb); si->dma_rx.skb = NULL; } skb = si->dma_tx.skb; if (skb) { - dma_unmap_single(si->dev, si->dma_tx.dma, skb->len, - DMA_TO_DEVICE); + dma_unmap_sg(si->dev, &si->dma_tx.sg, 1, + DMA_TO_DEVICE); dev_kfree_skb(skb); si->dma_tx.skb = NULL; } @@ -871,6 +868,9 @@ static int sa1100_irda_probe(struct platform_device *pdev) si->dev = &pdev->dev; si->pdata = pdev->dev.platform_data; + sg_init_table(&si->dma_rx.sg, 1); + sg_init_table(&si->dma_tx.sg, 1); + /* * Initialise the HP-SIR buffers */ -- cgit v0.10.2 From 04b7fc4dec4fcb61dbe022bbaffda8ea37c39430 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 12 Jan 2012 13:51:10 +0000 Subject: NET: sa11x0-ir: fix size of SIR transmit buffer The SIR transmit buffer was being allocated as 4000 bytes. IrDA now has constants for the buffer sizes, and defines the maximum wrapped SIR packet to be 4269 bytes as indicated by IRDA_SIR_MAX_FRAME. Use this definition to allocate the transmit buffer instead. Signed-off-by: Russell King diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c index 86d296e..be67bdc 100644 --- a/drivers/net/irda/sa1100_ir.c +++ b/drivers/net/irda/sa1100_ir.c @@ -877,7 +877,7 @@ static int sa1100_irda_probe(struct platform_device *pdev) err = sa1100_irda_init_iobuf(&si->rx_buff, 14384); if (err) goto err_mem_5; - err = sa1100_irda_init_iobuf(&si->tx_buff, 4000); + err = sa1100_irda_init_iobuf(&si->tx_buff, IRDA_SIR_MAX_FRAME); if (err) goto err_mem_5; -- cgit v0.10.2 From 3c500a35544d6270b127bce7d4c5a15ef454b9e2 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 12 Jan 2012 13:56:28 +0000 Subject: NET: sa11x0-ir: split si->dev for IrDA transmit and receive buffers The sa11x0-ir device is not the device which is doing the DMA, the DMA is being performed by a separate DMA engine. Split the struct device associated with each DMA channel from the main struct device, but for the time being initialize it from the main struct device. This is another preparatory step to converting this driver to use the DMA engine API. Signed-off-by: Russell King diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c index be67bdc..9c748f3 100644 --- a/drivers/net/irda/sa1100_ir.c +++ b/drivers/net/irda/sa1100_ir.c @@ -44,6 +44,7 @@ static int tx_lpm; static int max_rate = 4000000; struct sa1100_buf { + struct device *dev; struct sk_buff *skb; struct scatterlist sg; dma_regs_t *regs; @@ -99,7 +100,7 @@ static int sa1100_irda_rx_alloc(struct sa1100_irda *si) skb_reserve(si->dma_rx.skb, 1); sg_set_buf(&si->dma_rx.sg, si->dma_rx.skb->data, HPSIR_MAX_RXLEN); - if (dma_map_sg(si->dev, &si->dma_rx.sg, 1, DMA_FROM_DEVICE) == 0) { + if (dma_map_sg(si->dma_rx.dev, &si->dma_rx.sg, 1, DMA_FROM_DEVICE) == 0) { dev_kfree_skb_any(si->dma_rx.skb); return -ENOMEM; } @@ -295,7 +296,7 @@ static void sa1100_irda_firtxdma_irq(void *id) /* Account and free the packet. */ skb = si->dma_tx.skb; if (skb) { - dma_unmap_sg(si->dev, &si->dma_tx.sg, 1, + dma_unmap_sg(si->dma_tx.dev, &si->dma_tx.sg, 1, DMA_TO_DEVICE); dev->stats.tx_packets ++; dev->stats.tx_bytes += skb->len; @@ -317,7 +318,7 @@ static int sa1100_irda_fir_tx_start(struct sk_buff *skb, struct net_device *dev, si->dma_tx.skb = skb; sg_set_buf(&si->dma_tx.sg, skb->data, skb->len); - if (dma_map_sg(si->dev, &si->dma_tx.sg, 1, DMA_TO_DEVICE) == 0) { + if (dma_map_sg(si->dma_tx.dev, &si->dma_tx.sg, 1, DMA_TO_DEVICE) == 0) { si->dma_tx.skb = NULL; netif_wake_queue(dev); dev->stats.tx_dropped++; @@ -359,7 +360,7 @@ static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev len = dma_addr - sg_dma_address(&si->dma_rx.sg); if (len > HPSIR_MAX_RXLEN) len = HPSIR_MAX_RXLEN; - dma_unmap_sg(si->dev, &si->dma_rx.sg, 1, DMA_FROM_DEVICE); + dma_unmap_sg(si->dma_rx.dev, &si->dma_rx.sg, 1, DMA_FROM_DEVICE); do { /* @@ -407,7 +408,7 @@ static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev * Remap the buffer - it was previously mapped, and we * hope that this succeeds. */ - dma_map_sg(si->dev, &si->dma_rx.sg, 1, DMA_FROM_DEVICE); + dma_map_sg(si->dma_rx.dev, &si->dma_rx.sg, 1, DMA_FROM_DEVICE); } } @@ -726,6 +727,9 @@ static int sa1100_irda_start(struct net_device *dev) if (err) goto err_tx_dma; + si->dma_rx.dev = si->dev; + si->dma_tx.dev = si->dev; + /* * Setup the serial port for the specified speed. */ @@ -783,7 +787,7 @@ static int sa1100_irda_stop(struct net_device *dev) */ skb = si->dma_rx.skb; if (skb) { - dma_unmap_sg(si->dev, &si->dma_rx.sg, 1, + dma_unmap_sg(si->dma_rx.dev, &si->dma_rx.sg, 1, DMA_FROM_DEVICE); dev_kfree_skb(skb); si->dma_rx.skb = NULL; @@ -791,7 +795,7 @@ static int sa1100_irda_stop(struct net_device *dev) skb = si->dma_tx.skb; if (skb) { - dma_unmap_sg(si->dev, &si->dma_tx.sg, 1, + dma_unmap_sg(si->dma_tx.dev, &si->dma_tx.sg, 1, DMA_TO_DEVICE); dev_kfree_skb(skb); si->dma_tx.skb = NULL; -- cgit v0.10.2 From 6365bead25efc84a4cf4aa9b0a7638f8a970cdff Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 9 Jan 2012 21:44:07 +0000 Subject: DMA: sa11x0: add SA-11x0 DMA driver Add support for the SA-11x0 DMA driver, which replaces the private API version in arch/arm/mach-sa1100/dma.c. We model this as a set of virtual DMA channels, one for each request signal, and assign the virtual DMA channel to a physical DMA channel when there is work to be done. This allows DMA users to claim their channels, and hold them while not in use, without affecting the availability of the physical channels. Another advantage over this approach, compared to the private version, is that a channel can be reconfigured on the fly without having to release and re-request it - which for the IrDA driver, allows us to use DMA for SIR mode transmit without eating up three physical channels. As IrDA is half-duplex, we actually only need one physical channel, and this architecture allows us to achieve that. Signed-off-by: Russell King diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index f1a2749..4a6c46d 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -252,6 +252,15 @@ config EP93XX_DMA help Enable support for the Cirrus Logic EP93xx M2P/M2M DMA controller. +config DMA_SA11X0 + tristate "SA-11x0 DMA support" + depends on ARCH_SA1100 + select DMA_ENGINE + help + Support the DMA engine found on Intel StrongARM SA-1100 and + SA-1110 SoCs. This DMA engine can only be used with on-chip + devices. + config DMA_ENGINE bool diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 009a222..86b795b 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -27,3 +27,4 @@ obj-$(CONFIG_PL330_DMA) += pl330.o obj-$(CONFIG_PCH_DMA) += pch_dma.o obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o +obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c new file mode 100644 index 0000000..16a6b48 --- /dev/null +++ b/drivers/dma/sa11x0-dma.c @@ -0,0 +1,1109 @@ +/* + * SA11x0 DMAengine support + * + * Copyright (C) 2012 Russell King + * Derived in part from arch/arm/mach-sa1100/dma.c, + * Copyright (C) 2000, 2001 by Nicolas Pitre + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define NR_PHY_CHAN 6 +#define DMA_ALIGN 3 +#define DMA_MAX_SIZE 0x1fff +#define DMA_CHUNK_SIZE 0x1000 + +#define DMA_DDAR 0x00 +#define DMA_DCSR_S 0x04 +#define DMA_DCSR_C 0x08 +#define DMA_DCSR_R 0x0c +#define DMA_DBSA 0x10 +#define DMA_DBTA 0x14 +#define DMA_DBSB 0x18 +#define DMA_DBTB 0x1c +#define DMA_SIZE 0x20 + +#define DCSR_RUN (1 << 0) +#define DCSR_IE (1 << 1) +#define DCSR_ERROR (1 << 2) +#define DCSR_DONEA (1 << 3) +#define DCSR_STRTA (1 << 4) +#define DCSR_DONEB (1 << 5) +#define DCSR_STRTB (1 << 6) +#define DCSR_BIU (1 << 7) + +#define DDAR_RW (1 << 0) /* 0 = W, 1 = R */ +#define DDAR_E (1 << 1) /* 0 = LE, 1 = BE */ +#define DDAR_BS (1 << 2) /* 0 = BS4, 1 = BS8 */ +#define DDAR_DW (1 << 3) /* 0 = 8b, 1 = 16b */ +#define DDAR_Ser0UDCTr (0x0 << 4) +#define DDAR_Ser0UDCRc (0x1 << 4) +#define DDAR_Ser1SDLCTr (0x2 << 4) +#define DDAR_Ser1SDLCRc (0x3 << 4) +#define DDAR_Ser1UARTTr (0x4 << 4) +#define DDAR_Ser1UARTRc (0x5 << 4) +#define DDAR_Ser2ICPTr (0x6 << 4) +#define DDAR_Ser2ICPRc (0x7 << 4) +#define DDAR_Ser3UARTTr (0x8 << 4) +#define DDAR_Ser3UARTRc (0x9 << 4) +#define DDAR_Ser4MCP0Tr (0xa << 4) +#define DDAR_Ser4MCP0Rc (0xb << 4) +#define DDAR_Ser4MCP1Tr (0xc << 4) +#define DDAR_Ser4MCP1Rc (0xd << 4) +#define DDAR_Ser4SSPTr (0xe << 4) +#define DDAR_Ser4SSPRc (0xf << 4) + +struct sa11x0_dma_sg { + u32 addr; + u32 len; +}; + +struct sa11x0_dma_desc { + struct dma_async_tx_descriptor tx; + u32 ddar; + size_t size; + + /* maybe protected by c->lock */ + struct list_head node; + unsigned sglen; + struct sa11x0_dma_sg sg[0]; +}; + +struct sa11x0_dma_phy; + +struct sa11x0_dma_chan { + struct dma_chan chan; + spinlock_t lock; + dma_cookie_t lc; + + /* protected by c->lock */ + struct sa11x0_dma_phy *phy; + enum dma_status status; + struct list_head desc_submitted; + struct list_head desc_issued; + + /* protected by d->lock */ + struct list_head node; + + u32 ddar; + const char *name; +}; + +struct sa11x0_dma_phy { + void __iomem *base; + struct sa11x0_dma_dev *dev; + unsigned num; + + struct sa11x0_dma_chan *vchan; + + /* Protected by c->lock */ + unsigned sg_load; + struct sa11x0_dma_desc *txd_load; + unsigned sg_done; + struct sa11x0_dma_desc *txd_done; +#ifdef CONFIG_PM_SLEEP + u32 dbs[2]; + u32 dbt[2]; + u32 dcsr; +#endif +}; + +struct sa11x0_dma_dev { + struct dma_device slave; + void __iomem *base; + spinlock_t lock; + struct tasklet_struct task; + struct list_head chan_pending; + struct list_head desc_complete; + struct sa11x0_dma_phy phy[NR_PHY_CHAN]; +}; + +static struct sa11x0_dma_chan *to_sa11x0_dma_chan(struct dma_chan *chan) +{ + return container_of(chan, struct sa11x0_dma_chan, chan); +} + +static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev) +{ + return container_of(dmadev, struct sa11x0_dma_dev, slave); +} + +static struct sa11x0_dma_desc *to_sa11x0_dma_tx(struct dma_async_tx_descriptor *tx) +{ + return container_of(tx, struct sa11x0_dma_desc, tx); +} + +static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c) +{ + if (list_empty(&c->desc_issued)) + return NULL; + + return list_first_entry(&c->desc_issued, struct sa11x0_dma_desc, node); +} + +static void sa11x0_dma_start_desc(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd) +{ + list_del(&txd->node); + p->txd_load = txd; + p->sg_load = 0; + + dev_vdbg(p->dev->slave.dev, "pchan %u: txd %p[%x]: starting: DDAR:%x\n", + p->num, txd, txd->tx.cookie, txd->ddar); +} + +static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p, + struct sa11x0_dma_chan *c) +{ + struct sa11x0_dma_desc *txd = p->txd_load; + struct sa11x0_dma_sg *sg; + void __iomem *base = p->base; + unsigned dbsx, dbtx; + u32 dcsr; + + if (!txd) + return; + + dcsr = readl_relaxed(base + DMA_DCSR_R); + + /* Don't try to load the next transfer if both buffers are started */ + if ((dcsr & (DCSR_STRTA | DCSR_STRTB)) == (DCSR_STRTA | DCSR_STRTB)) + return; + + if (p->sg_load == txd->sglen) { + struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c); + + /* + * We have reached the end of the current descriptor. + * Peek at the next descriptor, and if compatible with + * the current, start processing it. + */ + if (txn && txn->ddar == txd->ddar) { + txd = txn; + sa11x0_dma_start_desc(p, txn); + } else { + p->txd_load = NULL; + return; + } + } + + sg = &txd->sg[p->sg_load++]; + + /* Select buffer to load according to channel status */ + if (((dcsr & (DCSR_BIU | DCSR_STRTB)) == (DCSR_BIU | DCSR_STRTB)) || + ((dcsr & (DCSR_BIU | DCSR_STRTA)) == 0)) { + dbsx = DMA_DBSA; + dbtx = DMA_DBTA; + dcsr = DCSR_STRTA | DCSR_IE | DCSR_RUN; + } else { + dbsx = DMA_DBSB; + dbtx = DMA_DBTB; + dcsr = DCSR_STRTB | DCSR_IE | DCSR_RUN; + } + + writel_relaxed(sg->addr, base + dbsx); + writel_relaxed(sg->len, base + dbtx); + writel(dcsr, base + DMA_DCSR_S); + + dev_dbg(p->dev->slave.dev, "pchan %u: load: DCSR:%02x DBS%c:%08x DBT%c:%08x\n", + p->num, dcsr, + 'A' + (dbsx == DMA_DBSB), sg->addr, + 'A' + (dbtx == DMA_DBTB), sg->len); +} + +static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p, + struct sa11x0_dma_chan *c) +{ + struct sa11x0_dma_desc *txd = p->txd_done; + + if (++p->sg_done == txd->sglen) { + struct sa11x0_dma_dev *d = p->dev; + + dev_vdbg(d->slave.dev, "pchan %u: txd %p[%x]: completed\n", + p->num, p->txd_done, p->txd_done->tx.cookie); + + c->lc = txd->tx.cookie; + + spin_lock(&d->lock); + list_add_tail(&txd->node, &d->desc_complete); + spin_unlock(&d->lock); + + p->sg_done = 0; + p->txd_done = p->txd_load; + + tasklet_schedule(&d->task); + } + + sa11x0_dma_start_sg(p, c); +} + +static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id) +{ + struct sa11x0_dma_phy *p = dev_id; + struct sa11x0_dma_dev *d = p->dev; + struct sa11x0_dma_chan *c; + u32 dcsr; + + dcsr = readl_relaxed(p->base + DMA_DCSR_R); + if (!(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB))) + return IRQ_NONE; + + /* Clear reported status bits */ + writel_relaxed(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB), + p->base + DMA_DCSR_C); + + dev_dbg(d->slave.dev, "pchan %u: irq: DCSR:%02x\n", p->num, dcsr); + + if (dcsr & DCSR_ERROR) { + dev_err(d->slave.dev, "pchan %u: error. DCSR:%02x DDAR:%08x DBSA:%08x DBTA:%08x DBSB:%08x DBTB:%08x\n", + p->num, dcsr, + readl_relaxed(p->base + DMA_DDAR), + readl_relaxed(p->base + DMA_DBSA), + readl_relaxed(p->base + DMA_DBTA), + readl_relaxed(p->base + DMA_DBSB), + readl_relaxed(p->base + DMA_DBTB)); + } + + c = p->vchan; + if (c) { + unsigned long flags; + + spin_lock_irqsave(&c->lock, flags); + /* + * Now that we're holding the lock, check that the vchan + * really is associated with this pchan before touching the + * hardware. This should always succeed, because we won't + * change p->vchan or c->phy while the channel is actively + * transferring. + */ + if (c->phy == p) { + if (dcsr & DCSR_DONEA) + sa11x0_dma_complete(p, c); + if (dcsr & DCSR_DONEB) + sa11x0_dma_complete(p, c); + } + spin_unlock_irqrestore(&c->lock, flags); + } + + return IRQ_HANDLED; +} + +static void sa11x0_dma_start_txd(struct sa11x0_dma_chan *c) +{ + struct sa11x0_dma_desc *txd = sa11x0_dma_next_desc(c); + + /* If the issued list is empty, we have no further txds to process */ + if (txd) { + struct sa11x0_dma_phy *p = c->phy; + + sa11x0_dma_start_desc(p, txd); + p->txd_done = txd; + p->sg_done = 0; + + /* The channel should not have any transfers started */ + WARN_ON(readl_relaxed(p->base + DMA_DCSR_R) & + (DCSR_STRTA | DCSR_STRTB)); + + /* Clear the run and start bits before changing DDAR */ + writel_relaxed(DCSR_RUN | DCSR_STRTA | DCSR_STRTB, + p->base + DMA_DCSR_C); + writel_relaxed(txd->ddar, p->base + DMA_DDAR); + + /* Try to start both buffers */ + sa11x0_dma_start_sg(p, c); + sa11x0_dma_start_sg(p, c); + } +} + +static void sa11x0_dma_tasklet(unsigned long arg) +{ + struct sa11x0_dma_dev *d = (struct sa11x0_dma_dev *)arg; + struct sa11x0_dma_phy *p; + struct sa11x0_dma_chan *c; + struct sa11x0_dma_desc *txd, *txn; + LIST_HEAD(head); + unsigned pch, pch_alloc = 0; + + dev_dbg(d->slave.dev, "tasklet enter\n"); + + /* Get the completed tx descriptors */ + spin_lock_irq(&d->lock); + list_splice_init(&d->desc_complete, &head); + spin_unlock_irq(&d->lock); + + list_for_each_entry(txd, &head, node) { + c = to_sa11x0_dma_chan(txd->tx.chan); + + dev_dbg(d->slave.dev, "vchan %p: txd %p[%x] completed\n", + c, txd, txd->tx.cookie); + + spin_lock_irq(&c->lock); + p = c->phy; + if (p) { + if (!p->txd_done) + sa11x0_dma_start_txd(c); + if (!p->txd_done) { + /* No current txd associated with this channel */ + dev_dbg(d->slave.dev, "pchan %u: free\n", p->num); + + /* Mark this channel free */ + c->phy = NULL; + p->vchan = NULL; + } + } + spin_unlock_irq(&c->lock); + } + + spin_lock_irq(&d->lock); + for (pch = 0; pch < NR_PHY_CHAN; pch++) { + p = &d->phy[pch]; + + if (p->vchan == NULL && !list_empty(&d->chan_pending)) { + c = list_first_entry(&d->chan_pending, + struct sa11x0_dma_chan, node); + list_del_init(&c->node); + + pch_alloc |= 1 << pch; + + /* Mark this channel allocated */ + p->vchan = c; + + dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, c); + } + } + spin_unlock_irq(&d->lock); + + for (pch = 0; pch < NR_PHY_CHAN; pch++) { + if (pch_alloc & (1 << pch)) { + p = &d->phy[pch]; + c = p->vchan; + + spin_lock_irq(&c->lock); + c->phy = p; + + sa11x0_dma_start_txd(c); + spin_unlock_irq(&c->lock); + } + } + + /* Now free the completed tx descriptor, and call their callbacks */ + list_for_each_entry_safe(txd, txn, &head, node) { + dma_async_tx_callback callback = txd->tx.callback; + void *callback_param = txd->tx.callback_param; + + dev_dbg(d->slave.dev, "txd %p[%x]: callback and free\n", + txd, txd->tx.cookie); + + kfree(txd); + + if (callback) + callback(callback_param); + } + + dev_dbg(d->slave.dev, "tasklet exit\n"); +} + + +static void sa11x0_dma_desc_free(struct sa11x0_dma_dev *d, struct list_head *head) +{ + struct sa11x0_dma_desc *txd, *txn; + + list_for_each_entry_safe(txd, txn, head, node) { + dev_dbg(d->slave.dev, "txd %p: freeing\n", txd); + kfree(txd); + } +} + +static int sa11x0_dma_alloc_chan_resources(struct dma_chan *chan) +{ + return 0; +} + +static void sa11x0_dma_free_chan_resources(struct dma_chan *chan) +{ + struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); + struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); + unsigned long flags; + LIST_HEAD(head); + + spin_lock_irqsave(&c->lock, flags); + spin_lock(&d->lock); + list_del_init(&c->node); + spin_unlock(&d->lock); + + list_splice_tail_init(&c->desc_submitted, &head); + list_splice_tail_init(&c->desc_issued, &head); + spin_unlock_irqrestore(&c->lock, flags); + + sa11x0_dma_desc_free(d, &head); +} + +static dma_addr_t sa11x0_dma_pos(struct sa11x0_dma_phy *p) +{ + unsigned reg; + u32 dcsr; + + dcsr = readl_relaxed(p->base + DMA_DCSR_R); + + if ((dcsr & (DCSR_BIU | DCSR_STRTA)) == DCSR_STRTA || + (dcsr & (DCSR_BIU | DCSR_STRTB)) == DCSR_BIU) + reg = DMA_DBSA; + else + reg = DMA_DBSB; + + return readl_relaxed(p->base + reg); +} + +static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, struct dma_tx_state *state) +{ + struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); + struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); + struct sa11x0_dma_phy *p; + struct sa11x0_dma_desc *txd; + dma_cookie_t last_used, last_complete; + unsigned long flags; + enum dma_status ret; + size_t bytes = 0; + + last_used = c->chan.cookie; + last_complete = c->lc; + + ret = dma_async_is_complete(cookie, last_complete, last_used); + if (ret == DMA_SUCCESS) { + dma_set_tx_state(state, last_complete, last_used, 0); + return ret; + } + + spin_lock_irqsave(&c->lock, flags); + p = c->phy; + ret = c->status; + if (p) { + dma_addr_t addr = sa11x0_dma_pos(p); + + dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr); + + txd = p->txd_done; + if (txd) { + unsigned i; + + for (i = 0; i < txd->sglen; i++) { + dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n", + i, txd->sg[i].addr, txd->sg[i].len); + if (addr >= txd->sg[i].addr && + addr < txd->sg[i].addr + txd->sg[i].len) { + unsigned len; + + len = txd->sg[i].len - + (addr - txd->sg[i].addr); + dev_vdbg(d->slave.dev, "tx_status: [%u] +%x\n", + i, len); + bytes += len; + i++; + break; + } + } + for (; i < txd->sglen; i++) { + dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x ++\n", + i, txd->sg[i].addr, txd->sg[i].len); + bytes += txd->sg[i].len; + } + } + if (txd != p->txd_load && p->txd_load) + bytes += p->txd_load->size; + } + list_for_each_entry(txd, &c->desc_issued, node) { + bytes += txd->size; + } + spin_unlock_irqrestore(&c->lock, flags); + + dma_set_tx_state(state, last_complete, last_used, bytes); + + dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", bytes); + + return ret; +} + +/* + * Move pending txds to the issued list, and re-init pending list. + * If not already pending, add this channel to the list of pending + * channels and trigger the tasklet to run. + */ +static void sa11x0_dma_issue_pending(struct dma_chan *chan) +{ + struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); + struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); + unsigned long flags; + + spin_lock_irqsave(&c->lock, flags); + list_splice_tail_init(&c->desc_submitted, &c->desc_issued); + if (!list_empty(&c->desc_issued)) { + spin_lock(&d->lock); + if (!c->phy && list_empty(&c->node)) { + list_add_tail(&c->node, &d->chan_pending); + tasklet_schedule(&d->task); + dev_dbg(d->slave.dev, "vchan %p: issued\n", c); + } + spin_unlock(&d->lock); + } else + dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", c); + spin_unlock_irqrestore(&c->lock, flags); +} + +static dma_cookie_t sa11x0_dma_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(tx->chan); + struct sa11x0_dma_desc *txd = to_sa11x0_dma_tx(tx); + unsigned long flags; + + spin_lock_irqsave(&c->lock, flags); + c->chan.cookie += 1; + if (c->chan.cookie < 0) + c->chan.cookie = 1; + txd->tx.cookie = c->chan.cookie; + + list_add_tail(&txd->node, &c->desc_submitted); + spin_unlock_irqrestore(&c->lock, flags); + + dev_dbg(tx->chan->device->dev, "vchan %p: txd %p[%x]: submitted\n", + c, txd, txd->tx.cookie); + + return txd->tx.cookie; +} + +static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg( + struct dma_chan *chan, struct scatterlist *sg, unsigned int sglen, + enum dma_transfer_direction dir, unsigned long flags) +{ + struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); + struct sa11x0_dma_desc *txd; + struct scatterlist *sgent; + unsigned i, j = sglen; + size_t size = 0; + + /* SA11x0 channels can only operate in their native direction */ + if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) { + dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n", + c, c->ddar, dir); + return NULL; + } + + /* Do not allow zero-sized txds */ + if (sglen == 0) + return NULL; + + for_each_sg(sg, sgent, sglen, i) { + dma_addr_t addr = sg_dma_address(sgent); + unsigned int len = sg_dma_len(sgent); + + if (len > DMA_MAX_SIZE) + j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1; + if (addr & DMA_ALIGN) { + dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %08x\n", + c, addr); + return NULL; + } + } + + txd = kzalloc(sizeof(*txd) + j * sizeof(txd->sg[0]), GFP_ATOMIC); + if (!txd) { + dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", c); + return NULL; + } + + j = 0; + for_each_sg(sg, sgent, sglen, i) { + dma_addr_t addr = sg_dma_address(sgent); + unsigned len = sg_dma_len(sgent); + + size += len; + + do { + unsigned tlen = len; + + /* + * Check whether the transfer will fit. If not, try + * to split the transfer up such that we end up with + * equal chunks - but make sure that we preserve the + * alignment. This avoids small segments. + */ + if (tlen > DMA_MAX_SIZE) { + unsigned mult = DIV_ROUND_UP(tlen, + DMA_MAX_SIZE & ~DMA_ALIGN); + + tlen = (tlen / mult) & ~DMA_ALIGN; + } + + txd->sg[j].addr = addr; + txd->sg[j].len = tlen; + + addr += tlen; + len -= tlen; + j++; + } while (len); + } + + dma_async_tx_descriptor_init(&txd->tx, &c->chan); + txd->tx.flags = flags; + txd->tx.tx_submit = sa11x0_dma_tx_submit; + txd->ddar = c->ddar; + txd->size = size; + txd->sglen = j; + + dev_dbg(chan->device->dev, "vchan %p: txd %p: size %u nr %u\n", + c, txd, txd->size, txd->sglen); + + return &txd->tx; +} + +static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_config *cfg) +{ + u32 ddar = c->ddar & ((0xf << 4) | DDAR_RW); + dma_addr_t addr; + enum dma_slave_buswidth width; + u32 maxburst; + + if (ddar & DDAR_RW) { + addr = cfg->src_addr; + width = cfg->src_addr_width; + maxburst = cfg->src_maxburst; + } else { + addr = cfg->dst_addr; + width = cfg->dst_addr_width; + maxburst = cfg->dst_maxburst; + } + + if ((width != DMA_SLAVE_BUSWIDTH_1_BYTE && + width != DMA_SLAVE_BUSWIDTH_2_BYTES) || + (maxburst != 4 && maxburst != 8)) + return -EINVAL; + + if (width == DMA_SLAVE_BUSWIDTH_2_BYTES) + ddar |= DDAR_DW; + if (maxburst == 8) + ddar |= DDAR_BS; + + dev_dbg(c->chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n", + c, addr, width, maxburst); + + c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6; + + return 0; +} + +static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, + unsigned long arg) +{ + struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); + struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); + struct sa11x0_dma_phy *p; + LIST_HEAD(head); + unsigned long flags; + int ret; + + switch (cmd) { + case DMA_SLAVE_CONFIG: + return sa11x0_dma_slave_config(c, (struct dma_slave_config *)arg); + + case DMA_TERMINATE_ALL: + dev_dbg(d->slave.dev, "vchan %p: terminate all\n", c); + /* Clear the tx descriptor lists */ + spin_lock_irqsave(&c->lock, flags); + list_splice_tail_init(&c->desc_submitted, &head); + list_splice_tail_init(&c->desc_issued, &head); + + p = c->phy; + if (p) { + struct sa11x0_dma_desc *txd, *txn; + + dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num); + /* vchan is assigned to a pchan - stop the channel */ + writel(DCSR_RUN | DCSR_IE | + DCSR_STRTA | DCSR_DONEA | + DCSR_STRTB | DCSR_DONEB, + p->base + DMA_DCSR_C); + + list_for_each_entry_safe(txd, txn, &d->desc_complete, node) + if (txd->tx.chan == &c->chan) + list_move(&txd->node, &head); + + if (p->txd_load) { + if (p->txd_load != p->txd_done) + list_add_tail(&p->txd_load->node, &head); + p->txd_load = NULL; + } + if (p->txd_done) { + list_add_tail(&p->txd_done->node, &head); + p->txd_done = NULL; + } + c->phy = NULL; + spin_lock(&d->lock); + p->vchan = NULL; + spin_unlock(&d->lock); + tasklet_schedule(&d->task); + } + spin_unlock_irqrestore(&c->lock, flags); + sa11x0_dma_desc_free(d, &head); + ret = 0; + break; + + case DMA_PAUSE: + dev_dbg(d->slave.dev, "vchan %p: pause\n", c); + spin_lock_irqsave(&c->lock, flags); + if (c->status == DMA_IN_PROGRESS) { + c->status = DMA_PAUSED; + + p = c->phy; + if (p) { + writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C); + } else { + spin_lock(&d->lock); + list_del_init(&c->node); + spin_unlock(&d->lock); + } + } + spin_unlock_irqrestore(&c->lock, flags); + ret = 0; + break; + + case DMA_RESUME: + dev_dbg(d->slave.dev, "vchan %p: resume\n", c); + spin_lock_irqsave(&c->lock, flags); + if (c->status == DMA_PAUSED) { + c->status = DMA_IN_PROGRESS; + + p = c->phy; + if (p) { + writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S); + } else if (!list_empty(&c->desc_issued)) { + spin_lock(&d->lock); + list_add_tail(&c->node, &d->chan_pending); + spin_unlock(&d->lock); + } + } + spin_unlock_irqrestore(&c->lock, flags); + ret = 0; + break; + + default: + ret = -ENXIO; + break; + } + + return ret; +} + +struct sa11x0_dma_channel_desc { + u32 ddar; + const char *name; +}; + +#define CD(d1, d2) { .ddar = DDAR_##d1 | d2, .name = #d1 } +static const struct sa11x0_dma_channel_desc chan_desc[] = { + CD(Ser0UDCTr, 0), + CD(Ser0UDCRc, DDAR_RW), + CD(Ser1SDLCTr, 0), + CD(Ser1SDLCRc, DDAR_RW), + CD(Ser1UARTTr, 0), + CD(Ser1UARTRc, DDAR_RW), + CD(Ser2ICPTr, 0), + CD(Ser2ICPRc, DDAR_RW), + CD(Ser3UARTTr, 0), + CD(Ser3UARTRc, DDAR_RW), + CD(Ser4MCP0Tr, 0), + CD(Ser4MCP0Rc, DDAR_RW), + CD(Ser4MCP1Tr, 0), + CD(Ser4MCP1Rc, DDAR_RW), + CD(Ser4SSPTr, 0), + CD(Ser4SSPRc, DDAR_RW), +}; + +static int __devinit sa11x0_dma_init_dmadev(struct dma_device *dmadev, + struct device *dev) +{ + unsigned i; + + dmadev->chancnt = ARRAY_SIZE(chan_desc); + INIT_LIST_HEAD(&dmadev->channels); + dmadev->dev = dev; + dmadev->device_alloc_chan_resources = sa11x0_dma_alloc_chan_resources; + dmadev->device_free_chan_resources = sa11x0_dma_free_chan_resources; + dmadev->device_control = sa11x0_dma_control; + dmadev->device_tx_status = sa11x0_dma_tx_status; + dmadev->device_issue_pending = sa11x0_dma_issue_pending; + + for (i = 0; i < dmadev->chancnt; i++) { + struct sa11x0_dma_chan *c; + + c = kzalloc(sizeof(*c), GFP_KERNEL); + if (!c) { + dev_err(dev, "no memory for channel %u\n", i); + return -ENOMEM; + } + + c->chan.device = dmadev; + c->status = DMA_IN_PROGRESS; + c->ddar = chan_desc[i].ddar; + c->name = chan_desc[i].name; + spin_lock_init(&c->lock); + INIT_LIST_HEAD(&c->desc_submitted); + INIT_LIST_HEAD(&c->desc_issued); + INIT_LIST_HEAD(&c->node); + list_add_tail(&c->chan.device_node, &dmadev->channels); + } + + return dma_async_device_register(dmadev); +} + +static int sa11x0_dma_request_irq(struct platform_device *pdev, int nr, + void *data) +{ + int irq = platform_get_irq(pdev, nr); + + if (irq <= 0) + return -ENXIO; + + return request_irq(irq, sa11x0_dma_irq, 0, dev_name(&pdev->dev), data); +} + +static void sa11x0_dma_free_irq(struct platform_device *pdev, int nr, + void *data) +{ + int irq = platform_get_irq(pdev, nr); + if (irq > 0) + free_irq(irq, data); +} + +static void sa11x0_dma_free_channels(struct dma_device *dmadev) +{ + struct sa11x0_dma_chan *c, *cn; + + list_for_each_entry_safe(c, cn, &dmadev->channels, chan.device_node) { + list_del(&c->chan.device_node); + kfree(c); + } +} + +static int __devinit sa11x0_dma_probe(struct platform_device *pdev) +{ + struct sa11x0_dma_dev *d; + struct resource *res; + unsigned i; + int ret; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENXIO; + + d = kzalloc(sizeof(*d), GFP_KERNEL); + if (!d) { + ret = -ENOMEM; + goto err_alloc; + } + + spin_lock_init(&d->lock); + INIT_LIST_HEAD(&d->chan_pending); + INIT_LIST_HEAD(&d->desc_complete); + + d->base = ioremap(res->start, resource_size(res)); + if (!d->base) { + ret = -ENOMEM; + goto err_ioremap; + } + + tasklet_init(&d->task, sa11x0_dma_tasklet, (unsigned long)d); + + for (i = 0; i < NR_PHY_CHAN; i++) { + struct sa11x0_dma_phy *p = &d->phy[i]; + + p->dev = d; + p->num = i; + p->base = d->base + i * DMA_SIZE; + writel_relaxed(DCSR_RUN | DCSR_IE | DCSR_ERROR | + DCSR_DONEA | DCSR_STRTA | DCSR_DONEB | DCSR_STRTB, + p->base + DMA_DCSR_C); + writel_relaxed(0, p->base + DMA_DDAR); + + ret = sa11x0_dma_request_irq(pdev, i, p); + if (ret) { + while (i) { + i--; + sa11x0_dma_free_irq(pdev, i, &d->phy[i]); + } + goto err_irq; + } + } + + dma_cap_set(DMA_SLAVE, d->slave.cap_mask); + d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg; + ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev); + if (ret) { + dev_warn(d->slave.dev, "failed to register slave async device: %d\n", + ret); + goto err_slave_reg; + } + + platform_set_drvdata(pdev, d); + return 0; + + err_slave_reg: + sa11x0_dma_free_channels(&d->slave); + for (i = 0; i < NR_PHY_CHAN; i++) + sa11x0_dma_free_irq(pdev, i, &d->phy[i]); + err_irq: + tasklet_kill(&d->task); + iounmap(d->base); + err_ioremap: + kfree(d); + err_alloc: + return ret; +} + +static int __devexit sa11x0_dma_remove(struct platform_device *pdev) +{ + struct sa11x0_dma_dev *d = platform_get_drvdata(pdev); + unsigned pch; + + dma_async_device_unregister(&d->slave); + + sa11x0_dma_free_channels(&d->slave); + for (pch = 0; pch < NR_PHY_CHAN; pch++) + sa11x0_dma_free_irq(pdev, pch, &d->phy[pch]); + tasklet_kill(&d->task); + iounmap(d->base); + kfree(d); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int sa11x0_dma_suspend(struct device *dev) +{ + struct sa11x0_dma_dev *d = dev_get_drvdata(dev); + unsigned pch; + + for (pch = 0; pch < NR_PHY_CHAN; pch++) { + struct sa11x0_dma_phy *p = &d->phy[pch]; + u32 dcsr, saved_dcsr; + + dcsr = saved_dcsr = readl_relaxed(p->base + DMA_DCSR_R); + if (dcsr & DCSR_RUN) { + writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C); + dcsr = readl_relaxed(p->base + DMA_DCSR_R); + } + + saved_dcsr &= DCSR_RUN | DCSR_IE; + if (dcsr & DCSR_BIU) { + p->dbs[0] = readl_relaxed(p->base + DMA_DBSB); + p->dbt[0] = readl_relaxed(p->base + DMA_DBTB); + p->dbs[1] = readl_relaxed(p->base + DMA_DBSA); + p->dbt[1] = readl_relaxed(p->base + DMA_DBTA); + saved_dcsr |= (dcsr & DCSR_STRTA ? DCSR_STRTB : 0) | + (dcsr & DCSR_STRTB ? DCSR_STRTA : 0); + } else { + p->dbs[0] = readl_relaxed(p->base + DMA_DBSA); + p->dbt[0] = readl_relaxed(p->base + DMA_DBTA); + p->dbs[1] = readl_relaxed(p->base + DMA_DBSB); + p->dbt[1] = readl_relaxed(p->base + DMA_DBTB); + saved_dcsr |= dcsr & (DCSR_STRTA | DCSR_STRTB); + } + p->dcsr = saved_dcsr; + + writel(DCSR_STRTA | DCSR_STRTB, p->base + DMA_DCSR_C); + } + + return 0; +} + +static int sa11x0_dma_resume(struct device *dev) +{ + struct sa11x0_dma_dev *d = dev_get_drvdata(dev); + unsigned pch; + + for (pch = 0; pch < NR_PHY_CHAN; pch++) { + struct sa11x0_dma_phy *p = &d->phy[pch]; + struct sa11x0_dma_desc *txd = NULL; + u32 dcsr = readl_relaxed(p->base + DMA_DCSR_R); + + WARN_ON(dcsr & (DCSR_BIU | DCSR_STRTA | DCSR_STRTB | DCSR_RUN)); + + if (p->txd_done) + txd = p->txd_done; + else if (p->txd_load) + txd = p->txd_load; + + if (!txd) + continue; + + writel_relaxed(txd->ddar, p->base + DMA_DDAR); + + writel_relaxed(p->dbs[0], p->base + DMA_DBSA); + writel_relaxed(p->dbt[0], p->base + DMA_DBTA); + writel_relaxed(p->dbs[1], p->base + DMA_DBSB); + writel_relaxed(p->dbt[1], p->base + DMA_DBTB); + writel_relaxed(p->dcsr, p->base + DMA_DCSR_S); + } + + return 0; +} +#endif + +static const struct dev_pm_ops sa11x0_dma_pm_ops = { + .suspend_noirq = sa11x0_dma_suspend, + .resume_noirq = sa11x0_dma_resume, + .freeze_noirq = sa11x0_dma_suspend, + .thaw_noirq = sa11x0_dma_resume, + .poweroff_noirq = sa11x0_dma_suspend, + .restore_noirq = sa11x0_dma_resume, +}; + +static struct platform_driver sa11x0_dma_driver = { + .driver = { + .name = "sa11x0-dma", + .owner = THIS_MODULE, + .pm = &sa11x0_dma_pm_ops, + }, + .probe = sa11x0_dma_probe, + .remove = __devexit_p(sa11x0_dma_remove), +}; + +bool sa11x0_dma_filter_fn(struct dma_chan *chan, void *param) +{ + if (chan->device->dev->driver == &sa11x0_dma_driver.driver) { + struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); + const char *p = param; + + return !strcmp(c->name, p); + } + return false; +} +EXPORT_SYMBOL(sa11x0_dma_filter_fn); + +static int __init sa11x0_dma_init(void) +{ + return platform_driver_register(&sa11x0_dma_driver); +} +subsys_initcall(sa11x0_dma_init); + +static void __exit sa11x0_dma_exit(void) +{ + platform_driver_unregister(&sa11x0_dma_driver); +} +module_exit(sa11x0_dma_exit); + +MODULE_AUTHOR("Russell King"); +MODULE_DESCRIPTION("SA-11x0 DMA driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:sa11x0-dma"); diff --git a/include/linux/sa11x0-dma.h b/include/linux/sa11x0-dma.h new file mode 100644 index 0000000..65839a5 --- /dev/null +++ b/include/linux/sa11x0-dma.h @@ -0,0 +1,24 @@ +/* + * SA11x0 DMA Engine support + * + * Copyright (C) 2012 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __LINUX_SA11X0_DMA_H +#define __LINUX_SA11X0_DMA_H + +struct dma_chan; + +#if defined(CONFIG_DMA_SA11X0) || defined(CONFIG_DMA_SA11X0_MODULE) +bool sa11x0_dma_filter_fn(struct dma_chan *, void *); +#else +static inline bool sa11x0_dma_filter_fn(struct dma_chan *c, void *d) +{ + return false; +} +#endif + +#endif -- cgit v0.10.2 From 7931d92f4f38100061cf6b8a9737967057f94871 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 9 Jan 2012 21:45:51 +0000 Subject: ARM: sa11x0: add SA-11x0 DMA device Add sa11x0 DMA platform device and resources to the list of generic platform devices for SA11x0 machines. Signed-off-by: Russell King diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c index bb10ee2..52ea622 100644 --- a/arch/arm/mach-sa1100/generic.c +++ b/arch/arm/mach-sa1100/generic.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -350,6 +351,29 @@ static struct platform_device sa11x0rtc_device = { .id = -1, }; +static struct resource sa11x0dma_resources[] = { + DEFINE_RES_MEM(__PREG(DDAR(0)), 6 * DMASp), + DEFINE_RES_IRQ(IRQ_DMA0), + DEFINE_RES_IRQ(IRQ_DMA1), + DEFINE_RES_IRQ(IRQ_DMA2), + DEFINE_RES_IRQ(IRQ_DMA3), + DEFINE_RES_IRQ(IRQ_DMA4), + DEFINE_RES_IRQ(IRQ_DMA5), +}; + +static u64 sa11x0dma_dma_mask = DMA_BIT_MASK(32); + +static struct platform_device sa11x0dma_device = { + .name = "sa11x0-dma", + .id = -1, + .dev = { + .dma_mask = &sa11x0dma_dma_mask, + .coherent_dma_mask = 0xffffffff, + }, + .num_resources = ARRAY_SIZE(sa11x0dma_resources), + .resource = sa11x0dma_resources, +}; + static struct platform_device *sa11x0_devices[] __initdata = { &sa11x0udc_device, &sa11x0uart1_device, @@ -358,6 +382,7 @@ static struct platform_device *sa11x0_devices[] __initdata = { &sa11x0pcmcia_device, &sa11x0fb_device, &sa11x0rtc_device, + &sa11x0dma_device, }; static int __init sa1100_init(void) -- cgit v0.10.2 From bf95154ff6c84e04afd9ba7f2b54a4628beefdb9 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 13 Jan 2012 11:48:13 +0000 Subject: NET: sa11x0-ir: convert sa11x0-ir driver to use DMA engine API Convert the sa11x0 IrDA driver to use the sa11x0 DMA engine driver rather than our own platform specific DMA API. Signed-off-by: Russell King diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig index e535137..4680478 100644 --- a/drivers/net/irda/Kconfig +++ b/drivers/net/irda/Kconfig @@ -356,7 +356,7 @@ config VLSI_FIR config SA1100_FIR tristate "SA1100 Internal IR" - depends on ARCH_SA1100 && IRDA + depends on ARCH_SA1100 && IRDA && DMA_SA11X0 config VIA_FIR tristate "VIA VT8231/VT1211 SIR/MIR/FIR" diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c index 9c748f3..620a48d 100644 --- a/drivers/net/irda/sa1100_ir.c +++ b/drivers/net/irda/sa1100_ir.c @@ -30,12 +30,13 @@ #include #include #include +#include +#include #include #include #include -#include #include #include @@ -47,7 +48,8 @@ struct sa1100_buf { struct device *dev; struct sk_buff *skb; struct scatterlist sg; - dma_regs_t *regs; + struct dma_chan *chan; + dma_cookie_t cookie; }; struct sa1100_irda { @@ -79,6 +81,75 @@ static int sa1100_irda_set_speed(struct sa1100_irda *, int); #define HPSIR_MAX_RXLEN 2047 +static struct dma_slave_config sa1100_irda_fir_rx = { + .direction = DMA_FROM_DEVICE, + .src_addr = __PREG(Ser2HSDR), + .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, + .src_maxburst = 8, +}; + +static struct dma_slave_config sa1100_irda_fir_tx = { + .direction = DMA_TO_DEVICE, + .dst_addr = __PREG(Ser2HSDR), + .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, + .dst_maxburst = 8, +}; + +static unsigned sa1100_irda_dma_xferred(struct sa1100_buf *buf) +{ + struct dma_chan *chan = buf->chan; + struct dma_tx_state state; + enum dma_status status; + + status = chan->device->device_tx_status(chan, buf->cookie, &state); + if (status != DMA_PAUSED) + return 0; + + return sg_dma_len(&buf->sg) - state.residue; +} + +static int sa1100_irda_dma_request(struct device *dev, struct sa1100_buf *buf, + const char *name, struct dma_slave_config *cfg) +{ + dma_cap_mask_t m; + int ret; + + dma_cap_zero(m); + dma_cap_set(DMA_SLAVE, m); + + buf->chan = dma_request_channel(m, sa11x0_dma_filter_fn, (void *)name); + if (!buf->chan) { + dev_err(dev, "unable to request DMA channel for %s\n", + name); + return -ENOENT; + } + + ret = dmaengine_slave_config(buf->chan, cfg); + if (ret) + dev_warn(dev, "DMA slave_config for %s returned %d\n", + name, ret); + + buf->dev = buf->chan->device->dev; + + return 0; +} + +static void sa1100_irda_dma_start(struct sa1100_buf *buf, + enum dma_transfer_direction dir, dma_async_tx_callback cb, void *cb_p) +{ + struct dma_async_tx_descriptor *desc; + struct dma_chan *chan = buf->chan; + + desc = chan->device->device_prep_slave_sg(chan, &buf->sg, 1, dir, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (desc) { + desc->callback = cb; + desc->callback_param = cb_p; + buf->cookie = dmaengine_submit(desc); + dma_async_issue_pending(chan); + } +} + /* * Allocate and map the receive buffer, unless it is already allocated. */ @@ -127,9 +198,9 @@ static void sa1100_irda_rx_dma_start(struct sa1100_irda *si) /* * Enable the DMA, receiver and receive interrupt. */ - sa1100_clear_dma(si->dma_rx.regs); - sa1100_start_dma(si->dma_rx.regs, sg_dma_address(&si->dma_rx.sg), - sg_dma_len(&si->dma_rx.sg)); + dmaengine_terminate_all(si->dma_rx.chan); + sa1100_irda_dma_start(&si->dma_rx, DMA_DEV_TO_MEM, NULL, NULL); + Ser2HSCR0 = HSCR0_HSSP | HSCR0_RXE; } @@ -326,8 +397,7 @@ static int sa1100_irda_fir_tx_start(struct sk_buff *skb, struct net_device *dev, return NETDEV_TX_OK; } - sa1100_start_dma(si->dma_tx.regs, sg_dma_address(&si->dma_tx.sg), - sg_dma_len(&si->dma_tx.sg)); + sa1100_irda_dma_start(&si->dma_tx, DMA_MEM_TO_DEV, sa1100_irda_firtxdma_irq, dev); /* * If we have a mean turn-around time, impose the specified @@ -345,7 +415,6 @@ static int sa1100_irda_fir_tx_start(struct sk_buff *skb, struct net_device *dev, static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev) { struct sk_buff *skb = si->dma_rx.skb; - dma_addr_t dma_addr; unsigned int len, stat, data; if (!skb) { @@ -356,8 +425,7 @@ static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev /* * Get the current data position. */ - dma_addr = sa1100_get_dma_pos(si->dma_rx.regs); - len = dma_addr - sg_dma_address(&si->dma_rx.sg); + len = sa1100_irda_dma_xferred(&si->dma_rx); if (len > HPSIR_MAX_RXLEN) len = HPSIR_MAX_RXLEN; dma_unmap_sg(si->dma_rx.dev, &si->dma_rx.sg, 1, DMA_FROM_DEVICE); @@ -421,7 +489,7 @@ static irqreturn_t sa1100_irda_fir_irq(struct net_device *dev, struct sa1100_ird /* * Stop RX DMA */ - sa1100_stop_dma(si->dma_rx.regs); + dmaengine_pause(si->dma_rx.chan); /* * Framing error - we throw away the packet completely. @@ -476,11 +544,9 @@ static int sa1100_irda_set_speed(struct sa1100_irda *si, int speed) case 57600: case 115200: brd = 3686400 / (16 * speed) - 1; - /* - * Stop the receive DMA. - */ + /* Stop the receive DMA, and configure transmit. */ if (IS_FIR(si)) - sa1100_stop_dma(si->dma_rx.regs); + dmaengine_terminate_all(si->dma_rx.chan); local_irq_save(flags); @@ -698,8 +764,8 @@ static void sa1100_irda_shutdown(struct sa1100_irda *si) /* * Stop all DMA activity. */ - sa1100_stop_dma(si->dma_rx.regs); - sa1100_stop_dma(si->dma_tx.regs); + dmaengine_terminate_all(si->dma_rx.chan); + dmaengine_terminate_all(si->dma_tx.chan); /* Disable the port. */ Ser2UTCR3 = 0; @@ -716,20 +782,16 @@ static int sa1100_irda_start(struct net_device *dev) si->speed = 9600; - err = sa1100_request_dma(DMA_Ser2HSSPRd, "IrDA receive", - NULL, NULL, &si->dma_rx.regs); + err = sa1100_irda_dma_request(si->dev, &si->dma_rx, "Ser2ICPRc", + &sa1100_irda_fir_rx); if (err) goto err_rx_dma; - err = sa1100_request_dma(DMA_Ser2HSSPWr, "IrDA transmit", - sa1100_irda_firtxdma_irq, dev, - &si->dma_tx.regs); + err = sa1100_irda_dma_request(si->dev, &si->dma_tx, "Ser2ICPTr", + &sa1100_irda_sir_tx); if (err) goto err_tx_dma; - si->dma_rx.dev = si->dev; - si->dma_tx.dev = si->dev; - /* * Setup the serial port for the specified speed. */ @@ -764,9 +826,9 @@ err_irlap: si->open = 0; sa1100_irda_shutdown(si); err_startup: - sa1100_free_dma(si->dma_tx.regs); + dma_release_channel(si->dma_tx.chan); err_tx_dma: - sa1100_free_dma(si->dma_rx.regs); + dma_release_channel(si->dma_rx.chan); err_rx_dma: return err; } @@ -810,8 +872,8 @@ static int sa1100_irda_stop(struct net_device *dev) /* * Free resources */ - sa1100_free_dma(si->dma_tx.regs); - sa1100_free_dma(si->dma_rx.regs); + dma_release_channel(si->dma_tx.chan); + dma_release_channel(si->dma_rx.chan); free_irq(dev->irq, dev); sa1100_set_power(si, 0); -- cgit v0.10.2 From d138dacb4b8255c02e4380ce2aadab758a99d2c1 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 13 Jan 2012 11:50:03 +0000 Subject: NET: sa11x0-ir: add DMA support for SIR transmit mode As the DMA engine API allows DMA channels to be reconfigured on the fly, we can now support switching the DMA channel configuration to support SIR transmit DMA without needing to claim an additional physical DMA channel - thereby using up half the DMA channels just for one driver. Signed-off-by: Russell King diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c index 620a48d..a0d1913 100644 --- a/drivers/net/irda/sa1100_ir.c +++ b/drivers/net/irda/sa1100_ir.c @@ -81,6 +81,13 @@ static int sa1100_irda_set_speed(struct sa1100_irda *, int); #define HPSIR_MAX_RXLEN 2047 +static struct dma_slave_config sa1100_irda_sir_tx = { + .direction = DMA_TO_DEVICE, + .dst_addr = __PREG(Ser2UTDR), + .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, + .dst_maxburst = 4, +}; + static struct dma_slave_config sa1100_irda_fir_rx = { .direction = DMA_FROM_DEVICE, .src_addr = __PREG(Ser2HSDR), @@ -215,6 +222,36 @@ static void sa1100_irda_check_speed(struct sa1100_irda *si) /* * HP-SIR format support. */ +static void sa1100_irda_sirtxdma_irq(void *id) +{ + struct net_device *dev = id; + struct sa1100_irda *si = netdev_priv(dev); + + dma_unmap_sg(si->dma_tx.dev, &si->dma_tx.sg, 1, DMA_TO_DEVICE); + dev_kfree_skb(si->dma_tx.skb); + si->dma_tx.skb = NULL; + + dev->stats.tx_packets++; + dev->stats.tx_bytes += sg_dma_len(&si->dma_tx.sg); + + /* We need to ensure that the transmitter has finished. */ + do + rmb(); + while (Ser2UTSR1 & UTSR1_TBY); + + /* + * Ok, we've finished transmitting. Now enable the receiver. + * Sometimes we get a receive IRQ immediately after a transmit... + */ + Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID; + Ser2UTCR3 = UTCR3_RIE | UTCR3_RXE | UTCR3_TXE; + + sa1100_irda_check_speed(si); + + /* I'm hungry! */ + netif_wake_queue(dev); +} + static int sa1100_irda_sir_tx_start(struct sk_buff *skb, struct net_device *dev, struct sa1100_irda *si) { @@ -222,14 +259,22 @@ static int sa1100_irda_sir_tx_start(struct sk_buff *skb, struct net_device *dev, si->tx_buff.len = async_wrap_skb(skb, si->tx_buff.data, si->tx_buff.truesize); + si->dma_tx.skb = skb; + sg_set_buf(&si->dma_tx.sg, si->tx_buff.data, si->tx_buff.len); + if (dma_map_sg(si->dma_tx.dev, &si->dma_tx.sg, 1, DMA_TO_DEVICE) == 0) { + si->dma_tx.skb = NULL; + netif_wake_queue(dev); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } + + sa1100_irda_dma_start(&si->dma_tx, DMA_MEM_TO_DEV, sa1100_irda_sirtxdma_irq, dev); + /* - * Set the transmit interrupt enable. This will fire off an - * interrupt immediately. Note that we disable the receiver - * so we won't get spurious characters received. + * The mean turn-around time is enforced by XBOF padding, + * so we don't have to do anything special here. */ - Ser2UTCR3 = UTCR3_TIE | UTCR3_TXE; - - dev_kfree_skb(skb); + Ser2UTCR3 = UTCR3_TXE; return NETDEV_TX_OK; } @@ -288,43 +333,6 @@ static irqreturn_t sa1100_irda_sir_irq(struct net_device *dev, struct sa1100_ird } - if (status & UTSR0_TFS && si->tx_buff.len) { - /* - * Transmitter FIFO is not full - */ - do { - Ser2UTDR = *si->tx_buff.data++; - si->tx_buff.len -= 1; - } while (Ser2UTSR1 & UTSR1_TNF && si->tx_buff.len); - - if (si->tx_buff.len == 0) { - dev->stats.tx_packets++; - dev->stats.tx_bytes += si->tx_buff.data - - si->tx_buff.head; - - /* - * We need to ensure that the transmitter has - * finished. - */ - do - rmb(); - while (Ser2UTSR1 & UTSR1_TBY); - - /* - * Ok, we've finished transmitting. Now enable - * the receiver. Sometimes we get a receive IRQ - * immediately after a transmit... - */ - Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID; - Ser2UTCR3 = UTCR3_RIE | UTCR3_RXE | UTCR3_TXE; - - sa1100_irda_check_speed(si); - - /* I'm hungry! */ - netif_wake_queue(dev); - } - } - return IRQ_HANDLED; } @@ -545,8 +553,11 @@ static int sa1100_irda_set_speed(struct sa1100_irda *si, int speed) brd = 3686400 / (16 * speed) - 1; /* Stop the receive DMA, and configure transmit. */ - if (IS_FIR(si)) + if (IS_FIR(si)) { dmaengine_terminate_all(si->dma_rx.chan); + dmaengine_slave_config(si->dma_tx.chan, + &sa1100_irda_sir_tx); + } local_irq_save(flags); @@ -574,6 +585,10 @@ static int sa1100_irda_set_speed(struct sa1100_irda *si, int speed) break; case 4000000: + if (!IS_FIR(si)) + dmaengine_slave_config(si->dma_tx.chan, + &sa1100_irda_fir_tx); + local_irq_save(flags); Ser2HSSR0 = 0xff; -- cgit v0.10.2