summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorGrant Likely <grant.likely@secretlab.ca>2011-06-09 18:42:57 (GMT)
committerGrant Likely <grant.likely@secretlab.ca>2011-06-09 18:42:57 (GMT)
commite4c8308c852e6b3fa49215052a5b9e99597dee99 (patch)
treea44ef3377c17d69c2210e809ac2552540ce6f1fc /drivers
parentc37f3c2749b53225d36faa5c583203c5f12ae15b (diff)
parent626a96db11698119a67eeda130488e869aa6f14e (diff)
downloadlinux-fsl-qoriq-e4c8308c852e6b3fa49215052a5b9e99597dee99.tar.xz
Merge branch 'ep93xx-dma' into spi/next
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/nbd.c22
-rw-r--r--drivers/block/xen-blkback/blkback.c10
-rw-r--r--drivers/block/xen-blkback/xenbus.c3
-rw-r--r--drivers/bluetooth/hci_ldisc.c17
-rw-r--r--drivers/clocksource/sh_cmt.c12
-rw-r--r--drivers/clocksource/sh_tmu.c12
-rw-r--r--drivers/dma/Kconfig7
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/ep93xx_dma.c1355
-rw-r--r--drivers/dma/shdma.c9
-rw-r--r--drivers/hwmon/coretemp.c23
-rw-r--r--drivers/hwmon/max6642.c22
-rw-r--r--drivers/input/serio/serport.c10
-rw-r--r--drivers/isdn/gigaset/ser-gigaset.c8
-rw-r--r--drivers/misc/ti-st/st_core.c6
-rw-r--r--drivers/net/3c509.c14
-rw-r--r--drivers/net/3c59x.c4
-rw-r--r--drivers/net/caif/caif_serial.c6
-rw-r--r--drivers/net/can/flexcan.c5
-rw-r--r--drivers/net/can/slcan.c9
-rw-r--r--drivers/net/davinci_emac.c10
-rw-r--r--drivers/net/depca.c35
-rw-r--r--drivers/net/dm9000.c6
-rw-r--r--drivers/net/hamradio/6pack.c8
-rw-r--r--drivers/net/hamradio/mkiss.c11
-rw-r--r--drivers/net/hp100.c12
-rw-r--r--drivers/net/ibmlana.c4
-rw-r--r--drivers/net/irda/irtty-sir.c16
-rw-r--r--drivers/net/irda/smsc-ircc2.c44
-rw-r--r--drivers/net/ks8842.c2
-rw-r--r--drivers/net/ne3210.c15
-rw-r--r--drivers/net/ppp_async.c6
-rw-r--r--drivers/net/ppp_synctty.c6
-rw-r--r--drivers/net/slip.c11
-rw-r--r--drivers/net/smc-mca.c6
-rw-r--r--drivers/net/tg3.c2
-rw-r--r--drivers/net/tokenring/madgemc.c2
-rw-r--r--drivers/net/tulip/de4x5.c4
-rw-r--r--drivers/net/usb/catc.c2
-rw-r--r--drivers/net/usb/cdc_ncm.c3
-rw-r--r--drivers/net/wan/x25_asy.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c22
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c3
-rw-r--r--drivers/net/wireless/b43/phy_n.c2
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-lib.c4
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c28
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.h1
-rw-r--r--drivers/net/wireless/libertas/cmd.c6
-rw-r--r--drivers/net/wireless/mwifiex/sdio.h4
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig1
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c35
-rw-r--r--drivers/net/wireless/wl12xx/conf.h3
-rw-r--r--drivers/net/wireless/wl12xx/main.c1
-rw-r--r--drivers/net/wireless/wl12xx/scan.c49
-rw-r--r--drivers/net/wireless/wl12xx/scan.h3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c53
-rw-r--r--drivers/rtc/rtc-m41t93.c2
-rw-r--r--drivers/scsi/scsi_scan.c2
-rw-r--r--drivers/scsi/scsi_sysfs.c1
-rw-r--r--drivers/spi/spi-ep93xx.c303
-rw-r--r--drivers/tty/n_gsm.c6
-rw-r--r--drivers/tty/n_hdlc.c18
-rw-r--r--drivers/tty/n_r3964.c10
-rw-r--r--drivers/tty/n_tty.c61
-rw-r--r--drivers/tty/tty_buffer.c15
-rw-r--r--drivers/tty/vt/selection.c3
75 files changed, 2097 insertions, 348 deletions
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index e6fc716..f533f33 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -192,7 +192,8 @@ static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size,
if (lo->xmit_timeout)
del_timer_sync(&ti);
} else
- result = kernel_recvmsg(sock, &msg, &iov, 1, size, 0);
+ result = kernel_recvmsg(sock, &msg, &iov, 1, size,
+ msg.msg_flags);
if (signal_pending(current)) {
siginfo_t info;
@@ -753,9 +754,26 @@ static int __init nbd_init(void)
return -ENOMEM;
part_shift = 0;
- if (max_part > 0)
+ if (max_part > 0) {
part_shift = fls(max_part);
+ /*
+ * Adjust max_part according to part_shift as it is exported
+ * to user space so that user can know the max number of
+ * partition kernel should be able to manage.
+ *
+ * Note that -1 is required because partition 0 is reserved
+ * for the whole disk.
+ */
+ max_part = (1UL << part_shift) - 1;
+ }
+
+ if ((1UL << part_shift) > DISK_MAX_PARTS)
+ return -EINVAL;
+
+ if (nbds_max > 1UL << (MINORBITS - part_shift))
+ return -EINVAL;
+
for (i = 0; i < nbds_max; i++) {
struct gendisk *disk = alloc_disk(1 << part_shift);
if (!disk)
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index c73910c..5cf2993 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -809,11 +809,13 @@ static int __init xen_blkif_init(void)
failed_init:
kfree(blkbk->pending_reqs);
kfree(blkbk->pending_grant_handles);
- for (i = 0; i < mmap_pages; i++) {
- if (blkbk->pending_pages[i])
- __free_page(blkbk->pending_pages[i]);
+ if (blkbk->pending_pages) {
+ for (i = 0; i < mmap_pages; i++) {
+ if (blkbk->pending_pages[i])
+ __free_page(blkbk->pending_pages[i]);
+ }
+ kfree(blkbk->pending_pages);
}
- kfree(blkbk->pending_pages);
kfree(blkbk);
blkbk = NULL;
return rc;
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 3457082..6cc0db1 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -357,14 +357,13 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
}
vbd->bdev = bdev;
- vbd->size = vbd_sz(vbd);
-
if (vbd->bdev->bd_disk == NULL) {
DPRINTK("xen_vbd_create: device %08x doesn't exist.\n",
vbd->pdevice);
xen_vbd_free(vbd);
return -ENOENT;
}
+ vbd->size = vbd_sz(vbd);
if (vbd->bdev->bd_disk->flags & GENHD_FL_CD || cdrom)
vbd->type |= VDISK_CDROM;
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index b3f0199..48ad2a7 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -355,29 +355,24 @@ static void hci_uart_tty_wakeup(struct tty_struct *tty)
* flags pointer to flags for data
* count count of received data in bytes
*
- * Return Value: Number of bytes received
+ * Return Value: None
*/
-static unsigned int hci_uart_tty_receive(struct tty_struct *tty,
- const u8 *data, char *flags, int count)
+static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data, char *flags, int count)
{
struct hci_uart *hu = (void *)tty->disc_data;
- int received;
if (!hu || tty != hu->tty)
- return -ENODEV;
+ return;
if (!test_bit(HCI_UART_PROTO_SET, &hu->flags))
- return -EINVAL;
+ return;
spin_lock(&hu->rx_lock);
- received = hu->proto->recv(hu, (void *) data, count);
- if (received > 0)
- hu->hdev->stat.byte_rx += received;
+ hu->proto->recv(hu, (void *) data, count);
+ hu->hdev->stat.byte_rx += count;
spin_unlock(&hu->rx_lock);
tty_unthrottle(tty);
-
- return received;
}
static int hci_uart_register_dev(struct hci_uart *hu)
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 036e586..dc7c033 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -24,7 +24,6 @@
#include <linux/ioport.h>
#include <linux/io.h>
#include <linux/clk.h>
-#include <linux/pm_runtime.h>
#include <linux/irq.h>
#include <linux/err.h>
#include <linux/clocksource.h>
@@ -153,12 +152,10 @@ static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
{
int ret;
- /* wake up device and enable clock */
- pm_runtime_get_sync(&p->pdev->dev);
+ /* enable clock */
ret = clk_enable(p->clk);
if (ret) {
dev_err(&p->pdev->dev, "cannot enable clock\n");
- pm_runtime_put_sync(&p->pdev->dev);
return ret;
}
@@ -190,9 +187,8 @@ static void sh_cmt_disable(struct sh_cmt_priv *p)
/* disable interrupts in CMT block */
sh_cmt_write(p, CMCSR, 0);
- /* stop clock and mark device as idle */
+ /* stop clock */
clk_disable(p->clk);
- pm_runtime_put_sync(&p->pdev->dev);
}
/* private flags */
@@ -664,7 +660,6 @@ static int __devinit sh_cmt_probe(struct platform_device *pdev)
if (p) {
dev_info(&pdev->dev, "kept as earlytimer\n");
- pm_runtime_enable(&pdev->dev);
return 0;
}
@@ -679,9 +674,6 @@ static int __devinit sh_cmt_probe(struct platform_device *pdev)
kfree(p);
platform_set_drvdata(pdev, NULL);
}
-
- if (!is_early_platform_device(pdev))
- pm_runtime_enable(&pdev->dev);
return ret;
}
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 1729628..8081357 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -25,7 +25,6 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/clk.h>
-#include <linux/pm_runtime.h>
#include <linux/irq.h>
#include <linux/err.h>
#include <linux/clocksource.h>
@@ -110,12 +109,10 @@ static int sh_tmu_enable(struct sh_tmu_priv *p)
{
int ret;
- /* wake up device and enable clock */
- pm_runtime_get_sync(&p->pdev->dev);
+ /* enable clock */
ret = clk_enable(p->clk);
if (ret) {
dev_err(&p->pdev->dev, "cannot enable clock\n");
- pm_runtime_put_sync(&p->pdev->dev);
return ret;
}
@@ -144,9 +141,8 @@ static void sh_tmu_disable(struct sh_tmu_priv *p)
/* disable interrupts in TMU block */
sh_tmu_write(p, TCR, 0x0000);
- /* stop clock and mark device as idle */
+ /* stop clock */
clk_disable(p->clk);
- pm_runtime_put_sync(&p->pdev->dev);
}
static void sh_tmu_set_next(struct sh_tmu_priv *p, unsigned long delta,
@@ -415,7 +411,6 @@ static int __devinit sh_tmu_probe(struct platform_device *pdev)
if (p) {
dev_info(&pdev->dev, "kept as earlytimer\n");
- pm_runtime_enable(&pdev->dev);
return 0;
}
@@ -430,9 +425,6 @@ static int __devinit sh_tmu_probe(struct platform_device *pdev)
kfree(p);
platform_set_drvdata(pdev, NULL);
}
-
- if (!is_early_platform_device(pdev))
- pm_runtime_enable(&pdev->dev);
return ret;
}
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 25cf327..2e3b3d3 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -237,6 +237,13 @@ config MXS_DMA
Support the MXS DMA engine. This engine including APBH-DMA
and APBX-DMA is integrated into Freescale i.MX23/28 chips.
+config EP93XX_DMA
+ bool "Cirrus Logic EP93xx DMA support"
+ depends on ARCH_EP93XX
+ select DMA_ENGINE
+ help
+ Enable support for the Cirrus Logic EP93xx M2P/M2M DMA controller.
+
config DMA_ENGINE
bool
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 836095a..30cf3b1 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -25,3 +25,4 @@ obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
obj-$(CONFIG_PL330_DMA) += pl330.o
obj-$(CONFIG_PCH_DMA) += pch_dma.o
obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
+obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
new file mode 100644
index 0000000..0766c1e
--- /dev/null
+++ b/drivers/dma/ep93xx_dma.c
@@ -0,0 +1,1355 @@
+/*
+ * Driver for the Cirrus Logic EP93xx DMA Controller
+ *
+ * Copyright (C) 2011 Mika Westerberg
+ *
+ * DMA M2P implementation is based on the original
+ * arch/arm/mach-ep93xx/dma-m2p.c which has following copyrights:
+ *
+ * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
+ * Copyright (C) 2006 Applied Data Systems
+ * Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com>
+ *
+ * This driver is based on dw_dmac and amba-pl08x drivers.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <mach/dma.h>
+
+/* M2P registers */
+#define M2P_CONTROL 0x0000
+#define M2P_CONTROL_STALLINT BIT(0)
+#define M2P_CONTROL_NFBINT BIT(1)
+#define M2P_CONTROL_CH_ERROR_INT BIT(3)
+#define M2P_CONTROL_ENABLE BIT(4)
+#define M2P_CONTROL_ICE BIT(6)
+
+#define M2P_INTERRUPT 0x0004
+#define M2P_INTERRUPT_STALL BIT(0)
+#define M2P_INTERRUPT_NFB BIT(1)
+#define M2P_INTERRUPT_ERROR BIT(3)
+
+#define M2P_PPALLOC 0x0008
+#define M2P_STATUS 0x000c
+
+#define M2P_MAXCNT0 0x0020
+#define M2P_BASE0 0x0024
+#define M2P_MAXCNT1 0x0030
+#define M2P_BASE1 0x0034
+
+#define M2P_STATE_IDLE 0
+#define M2P_STATE_STALL 1
+#define M2P_STATE_ON 2
+#define M2P_STATE_NEXT 3
+
+/* M2M registers */
+#define M2M_CONTROL 0x0000
+#define M2M_CONTROL_DONEINT BIT(2)
+#define M2M_CONTROL_ENABLE BIT(3)
+#define M2M_CONTROL_START BIT(4)
+#define M2M_CONTROL_DAH BIT(11)
+#define M2M_CONTROL_SAH BIT(12)
+#define M2M_CONTROL_PW_SHIFT 9
+#define M2M_CONTROL_PW_8 (0 << M2M_CONTROL_PW_SHIFT)
+#define M2M_CONTROL_PW_16 (1 << M2M_CONTROL_PW_SHIFT)
+#define M2M_CONTROL_PW_32 (2 << M2M_CONTROL_PW_SHIFT)
+#define M2M_CONTROL_PW_MASK (3 << M2M_CONTROL_PW_SHIFT)
+#define M2M_CONTROL_TM_SHIFT 13
+#define M2M_CONTROL_TM_TX (1 << M2M_CONTROL_TM_SHIFT)
+#define M2M_CONTROL_TM_RX (2 << M2M_CONTROL_TM_SHIFT)
+#define M2M_CONTROL_RSS_SHIFT 22
+#define M2M_CONTROL_RSS_SSPRX (1 << M2M_CONTROL_RSS_SHIFT)
+#define M2M_CONTROL_RSS_SSPTX (2 << M2M_CONTROL_RSS_SHIFT)
+#define M2M_CONTROL_RSS_IDE (3 << M2M_CONTROL_RSS_SHIFT)
+#define M2M_CONTROL_NO_HDSK BIT(24)
+#define M2M_CONTROL_PWSC_SHIFT 25
+
+#define M2M_INTERRUPT 0x0004
+#define M2M_INTERRUPT_DONEINT BIT(1)
+
+#define M2M_BCR0 0x0010
+#define M2M_BCR1 0x0014
+#define M2M_SAR_BASE0 0x0018
+#define M2M_SAR_BASE1 0x001c
+#define M2M_DAR_BASE0 0x002c
+#define M2M_DAR_BASE1 0x0030
+
+#define DMA_MAX_CHAN_BYTES 0xffff
+#define DMA_MAX_CHAN_DESCRIPTORS 32
+
+struct ep93xx_dma_engine;
+
+/**
+ * struct ep93xx_dma_desc - EP93xx specific transaction descriptor
+ * @src_addr: source address of the transaction
+ * @dst_addr: destination address of the transaction
+ * @size: size of the transaction (in bytes)
+ * @complete: this descriptor is completed
+ * @txd: dmaengine API descriptor
+ * @tx_list: list of linked descriptors
+ * @node: link used for putting this into a channel queue
+ */
+struct ep93xx_dma_desc {
+ u32 src_addr;
+ u32 dst_addr;
+ size_t size;
+ bool complete;
+ struct dma_async_tx_descriptor txd;
+ struct list_head tx_list;
+ struct list_head node;
+};
+
+/**
+ * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel
+ * @chan: dmaengine API channel
+ * @edma: pointer to to the engine device
+ * @regs: memory mapped registers
+ * @irq: interrupt number of the channel
+ * @clk: clock used by this channel
+ * @tasklet: channel specific tasklet used for callbacks
+ * @lock: lock protecting the fields following
+ * @flags: flags for the channel
+ * @buffer: which buffer to use next (0/1)
+ * @last_completed: last completed cookie value
+ * @active: flattened chain of descriptors currently being processed
+ * @queue: pending descriptors which are handled next
+ * @free_list: list of free descriptors which can be used
+ * @runtime_addr: physical address currently used as dest/src (M2M only). This
+ * is set via %DMA_SLAVE_CONFIG before slave operation is
+ * prepared
+ * @runtime_ctrl: M2M runtime values for the control register.
+ *
+ * As EP93xx DMA controller doesn't support real chained DMA descriptors we
+ * will have slightly different scheme here: @active points to a head of
+ * flattened DMA descriptor chain.
+ *
+ * @queue holds pending transactions. These are linked through the first
+ * descriptor in the chain. When a descriptor is moved to the @active queue,
+ * the first and chained descriptors are flattened into a single list.
+ *
+ * @chan.private holds pointer to &struct ep93xx_dma_data which contains
+ * necessary channel configuration information. For memcpy channels this must
+ * be %NULL.
+ */
+struct ep93xx_dma_chan {
+ struct dma_chan chan;
+ const struct ep93xx_dma_engine *edma;
+ void __iomem *regs;
+ int irq;
+ struct clk *clk;
+ struct tasklet_struct tasklet;
+ /* protects the fields following */
+ spinlock_t lock;
+ unsigned long flags;
+/* Channel is configured for cyclic transfers */
+#define EP93XX_DMA_IS_CYCLIC 0
+
+ int buffer;
+ dma_cookie_t last_completed;
+ struct list_head active;
+ struct list_head queue;
+ struct list_head free_list;
+ u32 runtime_addr;
+ u32 runtime_ctrl;
+};
+
+/**
+ * struct ep93xx_dma_engine - the EP93xx DMA engine instance
+ * @dma_dev: holds the dmaengine device
+ * @m2m: is this an M2M or M2P device
+ * @hw_setup: method which sets the channel up for operation
+ * @hw_shutdown: shuts the channel down and flushes whatever is left
+ * @hw_submit: pushes active descriptor(s) to the hardware
+ * @hw_interrupt: handle the interrupt
+ * @num_channels: number of channels for this instance
+ * @channels: array of channels
+ *
+ * There is one instance of this struct for the M2P channels and one for the
+ * M2M channels. hw_xxx() methods are used to perform operations which are
+ * different on M2M and M2P channels. These methods are called with channel
+ * lock held and interrupts disabled so they cannot sleep.
+ */
+struct ep93xx_dma_engine {
+ struct dma_device dma_dev;
+ bool m2m;
+ int (*hw_setup)(struct ep93xx_dma_chan *);
+ void (*hw_shutdown)(struct ep93xx_dma_chan *);
+ void (*hw_submit)(struct ep93xx_dma_chan *);
+ int (*hw_interrupt)(struct ep93xx_dma_chan *);
+#define INTERRUPT_UNKNOWN 0
+#define INTERRUPT_DONE 1
+#define INTERRUPT_NEXT_BUFFER 2
+
+ size_t num_channels;
+ struct ep93xx_dma_chan channels[];
+};
+
+static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac)
+{
+ return &edmac->chan.dev->device;
+}
+
+static struct ep93xx_dma_chan *to_ep93xx_dma_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct ep93xx_dma_chan, chan);
+}
+
+/**
+ * ep93xx_dma_set_active - set new active descriptor chain
+ * @edmac: channel
+ * @desc: head of the new active descriptor chain
+ *
+ * Sets @desc to be the head of the new active descriptor chain. This is the
+ * chain which is processed next. The active list must be empty before calling
+ * this function.
+ *
+ * Called with @edmac->lock held and interrupts disabled.
+ */
+static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
+ struct ep93xx_dma_desc *desc)
+{
+ BUG_ON(!list_empty(&edmac->active));
+
+ list_add_tail(&desc->node, &edmac->active);
+
+ /* Flatten the @desc->tx_list chain into @edmac->active list */
+ while (!list_empty(&desc->tx_list)) {
+ struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list,
+ struct ep93xx_dma_desc, node);
+
+ /*
+ * We copy the callback parameters from the first descriptor
+ * to all the chained descriptors. This way we can call the
+ * callback without having to find out the first descriptor in
+ * the chain. Useful for cyclic transfers.
+ */
+ d->txd.callback = desc->txd.callback;
+ d->txd.callback_param = desc->txd.callback_param;
+
+ list_move_tail(&d->node, &edmac->active);
+ }
+}
+
+/* Called with @edmac->lock held and interrupts disabled */
+static struct ep93xx_dma_desc *
+ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
+{
+ return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node);
+}
+
+/**
+ * ep93xx_dma_advance_active - advances to the next active descriptor
+ * @edmac: channel
+ *
+ * Function advances active descriptor to the next in the @edmac->active and
+ * returns %true if we still have descriptors in the chain to process.
+ * Otherwise returns %false.
+ *
+ * When the channel is in cyclic mode always returns %true.
+ *
+ * Called with @edmac->lock held and interrupts disabled.
+ */
+static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
+{
+ list_rotate_left(&edmac->active);
+
+ if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
+ return true;
+
+ /*
+ * If txd.cookie is set it means that we are back in the first
+ * descriptor in the chain and hence done with it.
+ */
+ return !ep93xx_dma_get_active(edmac)->txd.cookie;
+}
+
+/*
+ * M2P DMA implementation
+ */
+
+static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control)
+{
+ writel(control, edmac->regs + M2P_CONTROL);
+ /*
+ * EP93xx User's Guide states that we must perform a dummy read after
+ * write to the control register.
+ */
+ readl(edmac->regs + M2P_CONTROL);
+}
+
+static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
+{
+ struct ep93xx_dma_data *data = edmac->chan.private;
+ u32 control;
+
+ writel(data->port & 0xf, edmac->regs + M2P_PPALLOC);
+
+ control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE
+ | M2P_CONTROL_ENABLE;
+ m2p_set_control(edmac, control);
+
+ return 0;
+}
+
+static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
+{
+ return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
+}
+
+static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
+{
+ u32 control;
+
+ control = readl(edmac->regs + M2P_CONTROL);
+ control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
+ m2p_set_control(edmac, control);
+
+ while (m2p_channel_state(edmac) >= M2P_STATE_ON)
+ cpu_relax();
+
+ m2p_set_control(edmac, 0);
+
+ while (m2p_channel_state(edmac) == M2P_STATE_STALL)
+ cpu_relax();
+}
+
+static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
+{
+ struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
+ u32 bus_addr;
+
+ if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_TO_DEVICE)
+ bus_addr = desc->src_addr;
+ else
+ bus_addr = desc->dst_addr;
+
+ if (edmac->buffer == 0) {
+ writel(desc->size, edmac->regs + M2P_MAXCNT0);
+ writel(bus_addr, edmac->regs + M2P_BASE0);
+ } else {
+ writel(desc->size, edmac->regs + M2P_MAXCNT1);
+ writel(bus_addr, edmac->regs + M2P_BASE1);
+ }
+
+ edmac->buffer ^= 1;
+}
+
+static void m2p_hw_submit(struct ep93xx_dma_chan *edmac)
+{
+ u32 control = readl(edmac->regs + M2P_CONTROL);
+
+ m2p_fill_desc(edmac);
+ control |= M2P_CONTROL_STALLINT;
+
+ if (ep93xx_dma_advance_active(edmac)) {
+ m2p_fill_desc(edmac);
+ control |= M2P_CONTROL_NFBINT;
+ }
+
+ m2p_set_control(edmac, control);
+}
+
+static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
+{
+ u32 irq_status = readl(edmac->regs + M2P_INTERRUPT);
+ u32 control;
+
+ if (irq_status & M2P_INTERRUPT_ERROR) {
+ struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
+
+ /* Clear the error interrupt */
+ writel(1, edmac->regs + M2P_INTERRUPT);
+
+ /*
+ * It seems that there is no easy way of reporting errors back
+ * to client so we just report the error here and continue as
+ * usual.
+ *
+ * Revisit this when there is a mechanism to report back the
+ * errors.
+ */
+ dev_err(chan2dev(edmac),
+ "DMA transfer failed! Details:\n"
+ "\tcookie : %d\n"
+ "\tsrc_addr : 0x%08x\n"
+ "\tdst_addr : 0x%08x\n"
+ "\tsize : %zu\n",
+ desc->txd.cookie, desc->src_addr, desc->dst_addr,
+ desc->size);
+ }
+
+ switch (irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)) {
+ case M2P_INTERRUPT_STALL:
+ /* Disable interrupts */
+ control = readl(edmac->regs + M2P_CONTROL);
+ control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
+ m2p_set_control(edmac, control);
+
+ return INTERRUPT_DONE;
+
+ case M2P_INTERRUPT_NFB:
+ if (ep93xx_dma_advance_active(edmac))
+ m2p_fill_desc(edmac);
+
+ return INTERRUPT_NEXT_BUFFER;
+ }
+
+ return INTERRUPT_UNKNOWN;
+}
+
+/*
+ * M2M DMA implementation
+ *
+ * For the M2M transfers we don't use NFB at all. This is because it simply
+ * doesn't work well with memcpy transfers. When you submit both buffers it is
+ * extremely unlikely that you get an NFB interrupt, but it instead reports
+ * DONE interrupt and both buffers are already transferred which means that we
+ * weren't able to update the next buffer.
+ *
+ * So for now we "simulate" NFB by just submitting buffer after buffer
+ * without double buffering.
+ */
+
+static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
+{
+ const struct ep93xx_dma_data *data = edmac->chan.private;
+ u32 control = 0;
+
+ if (!data) {
+ /* This is memcpy channel, nothing to configure */
+ writel(control, edmac->regs + M2M_CONTROL);
+ return 0;
+ }
+
+ switch (data->port) {
+ case EP93XX_DMA_SSP:
+ /*
+ * This was found via experimenting - anything less than 5
+ * causes the channel to perform only a partial transfer which
+ * leads to problems since we don't get DONE interrupt then.
+ */
+ control = (5 << M2M_CONTROL_PWSC_SHIFT);
+ control |= M2M_CONTROL_NO_HDSK;
+
+ if (data->direction == DMA_TO_DEVICE) {
+ control |= M2M_CONTROL_DAH;
+ control |= M2M_CONTROL_TM_TX;
+ control |= M2M_CONTROL_RSS_SSPTX;
+ } else {
+ control |= M2M_CONTROL_SAH;
+ control |= M2M_CONTROL_TM_RX;
+ control |= M2M_CONTROL_RSS_SSPRX;
+ }
+ break;
+
+ case EP93XX_DMA_IDE:
+ /*
+ * This IDE part is totally untested. Values below are taken
+ * from the EP93xx Users's Guide and might not be correct.
+ */
+ control |= M2M_CONTROL_NO_HDSK;
+ control |= M2M_CONTROL_RSS_IDE;
+ control |= M2M_CONTROL_PW_16;
+
+ if (data->direction == DMA_TO_DEVICE) {
+ /* Worst case from the UG */
+ control = (3 << M2M_CONTROL_PWSC_SHIFT);
+ control |= M2M_CONTROL_DAH;
+ control |= M2M_CONTROL_TM_TX;
+ } else {
+ control = (2 << M2M_CONTROL_PWSC_SHIFT);
+ control |= M2M_CONTROL_SAH;
+ control |= M2M_CONTROL_TM_RX;
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ writel(control, edmac->regs + M2M_CONTROL);
+ return 0;
+}
+
+static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac)
+{
+ /* Just disable the channel */
+ writel(0, edmac->regs + M2M_CONTROL);
+}
+
+static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
+{
+ struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
+
+ if (edmac->buffer == 0) {
+ writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0);
+ writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0);
+ writel(desc->size, edmac->regs + M2M_BCR0);
+ } else {
+ writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1);
+ writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1);
+ writel(desc->size, edmac->regs + M2M_BCR1);
+ }
+
+ edmac->buffer ^= 1;
+}
+
+static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
+{
+ struct ep93xx_dma_data *data = edmac->chan.private;
+ u32 control = readl(edmac->regs + M2M_CONTROL);
+
+ /*
+ * Since we allow clients to configure PW (peripheral width) we always
+ * clear PW bits here and then set them according what is given in
+ * the runtime configuration.
+ */
+ control &= ~M2M_CONTROL_PW_MASK;
+ control |= edmac->runtime_ctrl;
+
+ m2m_fill_desc(edmac);
+ control |= M2M_CONTROL_DONEINT;
+
+ /*
+ * Now we can finally enable the channel. For M2M channel this must be
+ * done _after_ the BCRx registers are programmed.
+ */
+ control |= M2M_CONTROL_ENABLE;
+ writel(control, edmac->regs + M2M_CONTROL);
+
+ if (!data) {
+ /*
+ * For memcpy channels the software trigger must be asserted
+ * in order to start the memcpy operation.
+ */
+ control |= M2M_CONTROL_START;
+ writel(control, edmac->regs + M2M_CONTROL);
+ }
+}
+
+static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac)
+{
+ u32 control;
+
+ if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_DONEINT))
+ return INTERRUPT_UNKNOWN;
+
+ /* Clear the DONE bit */
+ writel(0, edmac->regs + M2M_INTERRUPT);
+
+ /* Disable interrupts and the channel */
+ control = readl(edmac->regs + M2M_CONTROL);
+ control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_ENABLE);
+ writel(control, edmac->regs + M2M_CONTROL);
+
+ /*
+ * Since we only get DONE interrupt we have to find out ourselves
+ * whether there still is something to process. So we try to advance
+ * the chain an see whether it succeeds.
+ */
+ if (ep93xx_dma_advance_active(edmac)) {
+ edmac->edma->hw_submit(edmac);
+ return INTERRUPT_NEXT_BUFFER;
+ }
+
+ return INTERRUPT_DONE;
+}
+
+/*
+ * DMA engine API implementation
+ */
+
+static struct ep93xx_dma_desc *
+ep93xx_dma_desc_get(struct ep93xx_dma_chan *edmac)
+{
+ struct ep93xx_dma_desc *desc, *_desc;
+ struct ep93xx_dma_desc *ret = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&edmac->lock, flags);
+ list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) {
+ if (async_tx_test_ack(&desc->txd)) {
+ list_del_init(&desc->node);
+
+ /* Re-initialize the descriptor */
+ desc->src_addr = 0;
+ desc->dst_addr = 0;
+ desc->size = 0;
+ desc->complete = false;
+ desc->txd.cookie = 0;
+ desc->txd.callback = NULL;
+ desc->txd.callback_param = NULL;
+
+ ret = desc;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&edmac->lock, flags);
+ return ret;
+}
+
+static void ep93xx_dma_desc_put(struct ep93xx_dma_chan *edmac,
+ struct ep93xx_dma_desc *desc)
+{
+ if (desc) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&edmac->lock, flags);
+ list_splice_init(&desc->tx_list, &edmac->free_list);
+ list_add(&desc->node, &edmac->free_list);
+ spin_unlock_irqrestore(&edmac->lock, flags);
+ }
+}
+
+/**
+ * ep93xx_dma_advance_work - start processing the next pending transaction
+ * @edmac: channel
+ *
+ * If we have pending transactions queued and we are currently idling, this
+ * function takes the next queued transaction from the @edmac->queue and
+ * pushes it to the hardware for execution.
+ */
+static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac)
+{
+ struct ep93xx_dma_desc *new;
+ unsigned long flags;
+
+ spin_lock_irqsave(&edmac->lock, flags);
+ if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) {
+ spin_unlock_irqrestore(&edmac->lock, flags);
+ return;
+ }
+
+ /* Take the next descriptor from the pending queue */
+ new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node);
+ list_del_init(&new->node);
+
+ ep93xx_dma_set_active(edmac, new);
+
+ /* Push it to the hardware */
+ edmac->edma->hw_submit(edmac);
+ spin_unlock_irqrestore(&edmac->lock, flags);
+}
+
+static void ep93xx_dma_unmap_buffers(struct ep93xx_dma_desc *desc)
+{
+ struct device *dev = desc->txd.chan->device->dev;
+
+ if (!(desc->txd.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+ if (desc->txd.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
+ dma_unmap_single(dev, desc->src_addr, desc->size,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_page(dev, desc->src_addr, desc->size,
+ DMA_TO_DEVICE);
+ }
+ if (!(desc->txd.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+ if (desc->txd.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
+ dma_unmap_single(dev, desc->dst_addr, desc->size,
+ DMA_FROM_DEVICE);
+ else
+ dma_unmap_page(dev, desc->dst_addr, desc->size,
+ DMA_FROM_DEVICE);
+ }
+}
+
+static void ep93xx_dma_tasklet(unsigned long data)
+{
+ struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
+ struct ep93xx_dma_desc *desc, *d;
+ dma_async_tx_callback callback;
+ void *callback_param;
+ LIST_HEAD(list);
+
+ spin_lock_irq(&edmac->lock);
+ desc = ep93xx_dma_get_active(edmac);
+ if (desc->complete) {
+ edmac->last_completed = desc->txd.cookie;
+ list_splice_init(&edmac->active, &list);
+ }
+ spin_unlock_irq(&edmac->lock);
+
+ /* Pick up the next descriptor from the queue */
+ ep93xx_dma_advance_work(edmac);
+
+ callback = desc->txd.callback;
+ callback_param = desc->txd.callback_param;
+
+ /* Now we can release all the chained descriptors */
+ list_for_each_entry_safe(desc, d, &list, node) {
+ /*
+ * For the memcpy channels the API requires us to unmap the
+ * buffers unless requested otherwise.
+ */
+ if (!edmac->chan.private)
+ ep93xx_dma_unmap_buffers(desc);
+
+ ep93xx_dma_desc_put(edmac, desc);
+ }
+
+ if (callback)
+ callback(callback_param);
+}
+
+static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
+{
+ struct ep93xx_dma_chan *edmac = dev_id;
+ irqreturn_t ret = IRQ_HANDLED;
+
+ spin_lock(&edmac->lock);
+
+ switch (edmac->edma->hw_interrupt(edmac)) {
+ case INTERRUPT_DONE:
+ ep93xx_dma_get_active(edmac)->complete = true;
+ tasklet_schedule(&edmac->tasklet);
+ break;
+
+ case INTERRUPT_NEXT_BUFFER:
+ if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
+ tasklet_schedule(&edmac->tasklet);
+ break;
+
+ default:
+ dev_warn(chan2dev(edmac), "unknown interrupt!\n");
+ ret = IRQ_NONE;
+ break;
+ }
+
+ spin_unlock(&edmac->lock);
+ return ret;
+}
+
+/**
+ * ep93xx_dma_tx_submit - set the prepared descriptor(s) to be executed
+ * @tx: descriptor to be executed
+ *
+ * Function will execute given descriptor on the hardware or if the hardware
+ * is busy, queue the descriptor to be executed later on. Returns cookie which
+ * can be used to poll the status of the descriptor.
+ */
+static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(tx->chan);
+ struct ep93xx_dma_desc *desc;
+ dma_cookie_t cookie;
+ unsigned long flags;
+
+ spin_lock_irqsave(&edmac->lock, flags);
+
+ cookie = edmac->chan.cookie;
+
+ if (++cookie < 0)
+ cookie = 1;
+
+ desc = container_of(tx, struct ep93xx_dma_desc, txd);
+
+ edmac->chan.cookie = cookie;
+ desc->txd.cookie = cookie;
+
+ /*
+ * If nothing is currently prosessed, we push this descriptor
+ * directly to the hardware. Otherwise we put the descriptor
+ * to the pending queue.
+ */
+ if (list_empty(&edmac->active)) {
+ ep93xx_dma_set_active(edmac, desc);
+ edmac->edma->hw_submit(edmac);
+ } else {
+ list_add_tail(&desc->node, &edmac->queue);
+ }
+
+ spin_unlock_irqrestore(&edmac->lock, flags);
+ return cookie;
+}
+
+/**
+ * ep93xx_dma_alloc_chan_resources - allocate resources for the channel
+ * @chan: channel to allocate resources
+ *
+ * Function allocates necessary resources for the given DMA channel and
+ * returns number of allocated descriptors for the channel. Negative errno
+ * is returned in case of failure.
+ */
+static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
+ struct ep93xx_dma_data *data = chan->private;
+ const char *name = dma_chan_name(chan);
+ int ret, i;
+
+ /* Sanity check the channel parameters */
+ if (!edmac->edma->m2m) {
+ if (!data)
+ return -EINVAL;
+ if (data->port < EP93XX_DMA_I2S1 ||
+ data->port > EP93XX_DMA_IRDA)
+ return -EINVAL;
+ if (data->direction != ep93xx_dma_chan_direction(chan))
+ return -EINVAL;
+ } else {
+ if (data) {
+ switch (data->port) {
+ case EP93XX_DMA_SSP:
+ case EP93XX_DMA_IDE:
+ if (data->direction != DMA_TO_DEVICE &&
+ data->direction != DMA_FROM_DEVICE)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ }
+
+ if (data && data->name)
+ name = data->name;
+
+ ret = clk_enable(edmac->clk);
+ if (ret)
+ return ret;
+
+ ret = request_irq(edmac->irq, ep93xx_dma_interrupt, 0, name, edmac);
+ if (ret)
+ goto fail_clk_disable;
+
+ spin_lock_irq(&edmac->lock);
+ edmac->last_completed = 1;
+ edmac->chan.cookie = 1;
+ ret = edmac->edma->hw_setup(edmac);
+ spin_unlock_irq(&edmac->lock);
+
+ if (ret)
+ goto fail_free_irq;
+
+ for (i = 0; i < DMA_MAX_CHAN_DESCRIPTORS; i++) {
+ struct ep93xx_dma_desc *desc;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc) {
+ dev_warn(chan2dev(edmac), "not enough descriptors\n");
+ break;
+ }
+
+ INIT_LIST_HEAD(&desc->tx_list);
+
+ dma_async_tx_descriptor_init(&desc->txd, chan);
+ desc->txd.flags = DMA_CTRL_ACK;
+ desc->txd.tx_submit = ep93xx_dma_tx_submit;
+
+ ep93xx_dma_desc_put(edmac, desc);
+ }
+
+ return i;
+
+fail_free_irq:
+ free_irq(edmac->irq, edmac);
+fail_clk_disable:
+ clk_disable(edmac->clk);
+
+ return ret;
+}
+
+/**
+ * ep93xx_dma_free_chan_resources - release resources for the channel
+ * @chan: channel
+ *
+ * Function releases all the resources allocated for the given channel.
+ * The channel must be idle when this is called.
+ */
+static void ep93xx_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
+ struct ep93xx_dma_desc *desc, *d;
+ unsigned long flags;
+ LIST_HEAD(list);
+
+ BUG_ON(!list_empty(&edmac->active));
+ BUG_ON(!list_empty(&edmac->queue));
+
+ spin_lock_irqsave(&edmac->lock, flags);
+ edmac->edma->hw_shutdown(edmac);
+ edmac->runtime_addr = 0;
+ edmac->runtime_ctrl = 0;
+ edmac->buffer = 0;
+ list_splice_init(&edmac->free_list, &list);
+ spin_unlock_irqrestore(&edmac->lock, flags);
+
+ list_for_each_entry_safe(desc, d, &list, node)
+ kfree(desc);
+
+ clk_disable(edmac->clk);
+ free_irq(edmac->irq, edmac);
+}
+
+/**
+ * ep93xx_dma_prep_dma_memcpy - prepare a memcpy DMA operation
+ * @chan: channel
+ * @dest: destination bus address
+ * @src: source bus address
+ * @len: size of the transaction
+ * @flags: flags for the descriptor
+ *
+ * Returns a valid DMA descriptor or %NULL in case of failure.
+ */
+struct dma_async_tx_descriptor *
+ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
+ dma_addr_t src, size_t len, unsigned long flags)
+{
+ struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
+ struct ep93xx_dma_desc *desc, *first;
+ size_t bytes, offset;
+
+ first = NULL;
+ for (offset = 0; offset < len; offset += bytes) {
+ desc = ep93xx_dma_desc_get(edmac);
+ if (!desc) {
+ dev_warn(chan2dev(edmac), "couln't get descriptor\n");
+ goto fail;
+ }
+
+ bytes = min_t(size_t, len - offset, DMA_MAX_CHAN_BYTES);
+
+ desc->src_addr = src + offset;
+ desc->dst_addr = dest + offset;
+ desc->size = bytes;
+
+ if (!first)
+ first = desc;
+ else
+ list_add_tail(&desc->node, &first->tx_list);
+ }
+
+ first->txd.cookie = -EBUSY;
+ first->txd.flags = flags;
+
+ return &first->txd;
+fail:
+ ep93xx_dma_desc_put(edmac, first);
+ return NULL;
+}
+
+/**
+ * ep93xx_dma_prep_slave_sg - prepare a slave DMA operation
+ * @chan: channel
+ * @sgl: list of buffers to transfer
+ * @sg_len: number of entries in @sgl
+ * @dir: direction of tha DMA transfer
+ * @flags: flags for the descriptor
+ *
+ * Returns a valid DMA descriptor or %NULL in case of failure.
+ */
+static struct dma_async_tx_descriptor *
+ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_data_direction dir,
+ unsigned long flags)
+{
+ struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
+ struct ep93xx_dma_desc *desc, *first;
+ struct scatterlist *sg;
+ int i;
+
+ if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
+ dev_warn(chan2dev(edmac),
+ "channel was configured with different direction\n");
+ return NULL;
+ }
+
+ if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
+ dev_warn(chan2dev(edmac),
+ "channel is already used for cyclic transfers\n");
+ return NULL;
+ }
+
+ first = NULL;
+ for_each_sg(sgl, sg, sg_len, i) {
+ size_t sg_len = sg_dma_len(sg);
+
+ if (sg_len > DMA_MAX_CHAN_BYTES) {
+ dev_warn(chan2dev(edmac), "too big transfer size %d\n",
+ sg_len);
+ goto fail;
+ }
+
+ desc = ep93xx_dma_desc_get(edmac);
+ if (!desc) {
+ dev_warn(chan2dev(edmac), "couln't get descriptor\n");
+ goto fail;
+ }
+
+ if (dir == DMA_TO_DEVICE) {
+ desc->src_addr = sg_dma_address(sg);
+ desc->dst_addr = edmac->runtime_addr;
+ } else {
+ desc->src_addr = edmac->runtime_addr;
+ desc->dst_addr = sg_dma_address(sg);
+ }
+ desc->size = sg_len;
+
+ if (!first)
+ first = desc;
+ else
+ list_add_tail(&desc->node, &first->tx_list);
+ }
+
+ first->txd.cookie = -EBUSY;
+ first->txd.flags = flags;
+
+ return &first->txd;
+
+fail:
+ ep93xx_dma_desc_put(edmac, first);
+ return NULL;
+}
+
+/**
+ * ep93xx_dma_prep_dma_cyclic - prepare a cyclic DMA operation
+ * @chan: channel
+ * @dma_addr: DMA mapped address of the buffer
+ * @buf_len: length of the buffer (in bytes)
+ * @period_len: lenght of a single period
+ * @dir: direction of the operation
+ *
+ * Prepares a descriptor for cyclic DMA operation. This means that once the
+ * descriptor is submitted, we will be submitting in a @period_len sized
+ * buffers and calling callback once the period has been elapsed. Transfer
+ * terminates only when client calls dmaengine_terminate_all() for this
+ * channel.
+ *
+ * Returns a valid DMA descriptor or %NULL in case of failure.
+ */
+static struct dma_async_tx_descriptor *
+ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
+ size_t buf_len, size_t period_len,
+ enum dma_data_direction dir)
+{
+ struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
+ struct ep93xx_dma_desc *desc, *first;
+ size_t offset = 0;
+
+ if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
+ dev_warn(chan2dev(edmac),
+ "channel was configured with different direction\n");
+ return NULL;
+ }
+
+ if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
+ dev_warn(chan2dev(edmac),
+ "channel is already used for cyclic transfers\n");
+ return NULL;
+ }
+
+ if (period_len > DMA_MAX_CHAN_BYTES) {
+ dev_warn(chan2dev(edmac), "too big period length %d\n",
+ period_len);
+ return NULL;
+ }
+
+ /* Split the buffer into period size chunks */
+ first = NULL;
+ for (offset = 0; offset < buf_len; offset += period_len) {
+ desc = ep93xx_dma_desc_get(edmac);
+ if (!desc) {
+ dev_warn(chan2dev(edmac), "couln't get descriptor\n");
+ goto fail;
+ }
+
+ if (dir == DMA_TO_DEVICE) {
+ desc->src_addr = dma_addr + offset;
+ desc->dst_addr = edmac->runtime_addr;
+ } else {
+ desc->src_addr = edmac->runtime_addr;
+ desc->dst_addr = dma_addr + offset;
+ }
+
+ desc->size = period_len;
+
+ if (!first)
+ first = desc;
+ else
+ list_add_tail(&desc->node, &first->tx_list);
+ }
+
+ first->txd.cookie = -EBUSY;
+
+ return &first->txd;
+
+fail:
+ ep93xx_dma_desc_put(edmac, first);
+ return NULL;
+}
+
+/**
+ * ep93xx_dma_terminate_all - terminate all transactions
+ * @edmac: channel
+ *
+ * Stops all DMA transactions. All descriptors are put back to the
+ * @edmac->free_list and callbacks are _not_ called.
+ */
+static int ep93xx_dma_terminate_all(struct ep93xx_dma_chan *edmac)
+{
+ struct ep93xx_dma_desc *desc, *_d;
+ unsigned long flags;
+ LIST_HEAD(list);
+
+ spin_lock_irqsave(&edmac->lock, flags);
+ /* First we disable and flush the DMA channel */
+ edmac->edma->hw_shutdown(edmac);
+ clear_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags);
+ list_splice_init(&edmac->active, &list);
+ list_splice_init(&edmac->queue, &list);
+ /*
+ * We then re-enable the channel. This way we can continue submitting
+ * the descriptors by just calling ->hw_submit() again.
+ */
+ edmac->edma->hw_setup(edmac);
+ spin_unlock_irqrestore(&edmac->lock, flags);
+
+ list_for_each_entry_safe(desc, _d, &list, node)
+ ep93xx_dma_desc_put(edmac, desc);
+
+ return 0;
+}
+
+static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac,
+ struct dma_slave_config *config)
+{
+ enum dma_slave_buswidth width;
+ unsigned long flags;
+ u32 addr, ctrl;
+
+ if (!edmac->edma->m2m)
+ return -EINVAL;
+
+ switch (config->direction) {
+ case DMA_FROM_DEVICE:
+ width = config->src_addr_width;
+ addr = config->src_addr;
+ break;
+
+ case DMA_TO_DEVICE:
+ width = config->dst_addr_width;
+ addr = config->dst_addr;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ switch (width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ ctrl = 0;
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ ctrl = M2M_CONTROL_PW_16;
+ break;
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ ctrl = M2M_CONTROL_PW_32;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&edmac->lock, flags);
+ edmac->runtime_addr = addr;
+ edmac->runtime_ctrl = ctrl;
+ spin_unlock_irqrestore(&edmac->lock, flags);
+
+ return 0;
+}
+
+/**
+ * ep93xx_dma_control - manipulate all pending operations on a channel
+ * @chan: channel
+ * @cmd: control command to perform
+ * @arg: optional argument
+ *
+ * Controls the channel. Function returns %0 in case of success or negative
+ * error in case of failure.
+ */
+static int ep93xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
+ struct dma_slave_config *config;
+
+ switch (cmd) {
+ case DMA_TERMINATE_ALL:
+ return ep93xx_dma_terminate_all(edmac);
+
+ case DMA_SLAVE_CONFIG:
+ config = (struct dma_slave_config *)arg;
+ return ep93xx_dma_slave_config(edmac, config);
+
+ default:
+ break;
+ }
+
+ return -ENOSYS;
+}
+
+/**
+ * ep93xx_dma_tx_status - check if a transaction is completed
+ * @chan: channel
+ * @cookie: transaction specific cookie
+ * @state: state of the transaction is stored here if given
+ *
+ * This function can be used to query state of a given transaction.
+ */
+static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *state)
+{
+ struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
+ dma_cookie_t last_used, last_completed;
+ enum dma_status ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&edmac->lock, flags);
+ last_used = chan->cookie;
+ last_completed = edmac->last_completed;
+ spin_unlock_irqrestore(&edmac->lock, flags);
+
+ ret = dma_async_is_complete(cookie, last_completed, last_used);
+ dma_set_tx_state(state, last_completed, last_used, 0);
+
+ return ret;
+}
+
+/**
+ * ep93xx_dma_issue_pending - push pending transactions to the hardware
+ * @chan: channel
+ *
+ * When this function is called, all pending transactions are pushed to the
+ * hardware and executed.
+ */
+static void ep93xx_dma_issue_pending(struct dma_chan *chan)
+{
+ ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan));
+}
+
+static int __init ep93xx_dma_probe(struct platform_device *pdev)
+{
+ struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct ep93xx_dma_engine *edma;
+ struct dma_device *dma_dev;
+ size_t edma_size;
+ int ret, i;
+
+ edma_size = pdata->num_channels * sizeof(struct ep93xx_dma_chan);
+ edma = kzalloc(sizeof(*edma) + edma_size, GFP_KERNEL);
+ if (!edma)
+ return -ENOMEM;
+
+ dma_dev = &edma->dma_dev;
+ edma->m2m = platform_get_device_id(pdev)->driver_data;
+ edma->num_channels = pdata->num_channels;
+
+ INIT_LIST_HEAD(&dma_dev->channels);
+ for (i = 0; i < pdata->num_channels; i++) {
+ const struct ep93xx_dma_chan_data *cdata = &pdata->channels[i];
+ struct ep93xx_dma_chan *edmac = &edma->channels[i];
+
+ edmac->chan.device = dma_dev;
+ edmac->regs = cdata->base;
+ edmac->irq = cdata->irq;
+ edmac->edma = edma;
+
+ edmac->clk = clk_get(NULL, cdata->name);
+ if (IS_ERR(edmac->clk)) {
+ dev_warn(&pdev->dev, "failed to get clock for %s\n",
+ cdata->name);
+ continue;
+ }
+
+ spin_lock_init(&edmac->lock);
+ INIT_LIST_HEAD(&edmac->active);
+ INIT_LIST_HEAD(&edmac->queue);
+ INIT_LIST_HEAD(&edmac->free_list);
+ tasklet_init(&edmac->tasklet, ep93xx_dma_tasklet,
+ (unsigned long)edmac);
+
+ list_add_tail(&edmac->chan.device_node,
+ &dma_dev->channels);
+ }
+
+ dma_cap_zero(dma_dev->cap_mask);
+ dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
+ dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
+
+ dma_dev->dev = &pdev->dev;
+ dma_dev->device_alloc_chan_resources = ep93xx_dma_alloc_chan_resources;
+ dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources;
+ dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
+ dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
+ dma_dev->device_control = ep93xx_dma_control;
+ dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
+ dma_dev->device_tx_status = ep93xx_dma_tx_status;
+
+ dma_set_max_seg_size(dma_dev->dev, DMA_MAX_CHAN_BYTES);
+
+ if (edma->m2m) {
+ dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+ dma_dev->device_prep_dma_memcpy = ep93xx_dma_prep_dma_memcpy;
+
+ edma->hw_setup = m2m_hw_setup;
+ edma->hw_shutdown = m2m_hw_shutdown;
+ edma->hw_submit = m2m_hw_submit;
+ edma->hw_interrupt = m2m_hw_interrupt;
+ } else {
+ dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
+
+ edma->hw_setup = m2p_hw_setup;
+ edma->hw_shutdown = m2p_hw_shutdown;
+ edma->hw_submit = m2p_hw_submit;
+ edma->hw_interrupt = m2p_hw_interrupt;
+ }
+
+ ret = dma_async_device_register(dma_dev);
+ if (unlikely(ret)) {
+ for (i = 0; i < edma->num_channels; i++) {
+ struct ep93xx_dma_chan *edmac = &edma->channels[i];
+ if (!IS_ERR_OR_NULL(edmac->clk))
+ clk_put(edmac->clk);
+ }
+ kfree(edma);
+ } else {
+ dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n",
+ edma->m2m ? "M" : "P");
+ }
+
+ return ret;
+}
+
+static struct platform_device_id ep93xx_dma_driver_ids[] = {
+ { "ep93xx-dma-m2p", 0 },
+ { "ep93xx-dma-m2m", 1 },
+ { },
+};
+
+static struct platform_driver ep93xx_dma_driver = {
+ .driver = {
+ .name = "ep93xx-dma",
+ },
+ .id_table = ep93xx_dma_driver_ids,
+};
+
+static int __init ep93xx_dma_module_init(void)
+{
+ return platform_driver_probe(&ep93xx_dma_driver, ep93xx_dma_probe);
+}
+subsys_initcall(ep93xx_dma_module_init);
+
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
+MODULE_DESCRIPTION("EP93xx DMA driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 636e409..2a638f9 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -343,7 +343,7 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
dmae_set_dmars(sh_chan, cfg->mid_rid);
dmae_set_chcr(sh_chan, cfg->chcr);
- } else if ((sh_dmae_readl(sh_chan, CHCR) & 0xf00) != 0x400) {
+ } else {
dmae_init(sh_chan);
}
@@ -1144,6 +1144,8 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
/* platform data */
shdev->pdata = pdata;
+ platform_set_drvdata(pdev, shdev);
+
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
@@ -1256,7 +1258,6 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
pm_runtime_put(&pdev->dev);
- platform_set_drvdata(pdev, shdev);
dma_async_device_register(&shdev->common);
return err;
@@ -1278,6 +1279,8 @@ rst_err:
if (dmars)
iounmap(shdev->dmars);
+
+ platform_set_drvdata(pdev, NULL);
emapdmars:
iounmap(shdev->chan_reg);
synchronize_rcu();
@@ -1316,6 +1319,8 @@ static int __exit sh_dmae_remove(struct platform_device *pdev)
iounmap(shdev->dmars);
iounmap(shdev->chan_reg);
+ platform_set_drvdata(pdev, NULL);
+
synchronize_rcu();
kfree(shdev);
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index de3d246..85e9379 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -296,7 +296,7 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
* If the TjMax is not plausible, an assumption
* will be used
*/
- if (val > 80 && val < 120) {
+ if (val) {
dev_info(dev, "TjMax is %d C.\n", val);
return val * 1000;
}
@@ -304,24 +304,9 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
/*
* An assumption is made for early CPUs and unreadable MSR.
- * NOTE: the given value may not be correct.
+ * NOTE: the calculated value may not be correct.
*/
-
- switch (c->x86_model) {
- case 0xe:
- case 0xf:
- case 0x16:
- case 0x1a:
- dev_warn(dev, "TjMax is assumed as 100 C!\n");
- return 100000;
- case 0x17:
- case 0x1c: /* Atom CPUs */
- return adjust_tjmax(c, id, dev);
- default:
- dev_warn(dev, "CPU (model=0x%x) is not supported yet,"
- " using default TjMax of 100C.\n", c->x86_model);
- return 100000;
- }
+ return adjust_tjmax(c, id, dev);
}
static void __devinit get_ucode_rev_on_cpu(void *edx)
@@ -341,7 +326,7 @@ static int get_pkg_tjmax(unsigned int cpu, struct device *dev)
err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
if (!err) {
val = (eax >> 16) & 0xff;
- if (val > 80 && val < 120)
+ if (val)
return val * 1000;
}
dev_warn(dev, "Unable to read Pkg-TjMax from CPU:%u\n", cpu);
diff --git a/drivers/hwmon/max6642.c b/drivers/hwmon/max6642.c
index 0f9fc40..e855d3b 100644
--- a/drivers/hwmon/max6642.c
+++ b/drivers/hwmon/max6642.c
@@ -136,15 +136,29 @@ static int max6642_detect(struct i2c_client *client,
if (man_id != 0x4D)
return -ENODEV;
+ /* sanity check */
+ if (i2c_smbus_read_byte_data(client, 0x04) != 0x4D
+ || i2c_smbus_read_byte_data(client, 0x06) != 0x4D
+ || i2c_smbus_read_byte_data(client, 0xff) != 0x4D)
+ return -ENODEV;
+
/*
* We read the config and status register, the 4 lower bits in the
* config register should be zero and bit 5, 3, 1 and 0 should be
* zero in the status register.
*/
reg_config = i2c_smbus_read_byte_data(client, MAX6642_REG_R_CONFIG);
+ if ((reg_config & 0x0f) != 0x00)
+ return -ENODEV;
+
+ /* in between, another round of sanity checks */
+ if (i2c_smbus_read_byte_data(client, 0x04) != reg_config
+ || i2c_smbus_read_byte_data(client, 0x06) != reg_config
+ || i2c_smbus_read_byte_data(client, 0xff) != reg_config)
+ return -ENODEV;
+
reg_status = i2c_smbus_read_byte_data(client, MAX6642_REG_R_STATUS);
- if (((reg_config & 0x0f) != 0x00) ||
- ((reg_status & 0x2b) != 0x00))
+ if ((reg_status & 0x2b) != 0x00)
return -ENODEV;
strlcpy(info->type, "max6642", I2C_NAME_SIZE);
@@ -246,7 +260,7 @@ static SENSOR_DEVICE_ATTR_2(temp1_max, S_IWUSR | S_IRUGO, show_temp_max,
set_temp_max, 0, MAX6642_REG_W_LOCAL_HIGH);
static SENSOR_DEVICE_ATTR_2(temp2_max, S_IWUSR | S_IRUGO, show_temp_max,
set_temp_max, 1, MAX6642_REG_W_REMOTE_HIGH);
-static SENSOR_DEVICE_ATTR(temp_fault, S_IRUGO, show_alarm, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 2);
static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 6);
static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 4);
@@ -256,7 +270,7 @@ static struct attribute *max6642_attributes[] = {
&sensor_dev_attr_temp1_max.dev_attr.attr,
&sensor_dev_attr_temp2_max.dev_attr.attr,
- &sensor_dev_attr_temp_fault.dev_attr.attr,
+ &sensor_dev_attr_temp2_fault.dev_attr.attr,
&sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
&sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
NULL
diff --git a/drivers/input/serio/serport.c b/drivers/input/serio/serport.c
index f369896..8755f5f 100644
--- a/drivers/input/serio/serport.c
+++ b/drivers/input/serio/serport.c
@@ -120,21 +120,17 @@ static void serport_ldisc_close(struct tty_struct *tty)
* 'interrupt' routine.
*/
-static unsigned int serport_ldisc_receive(struct tty_struct *tty,
- const unsigned char *cp, char *fp, int count)
+static void serport_ldisc_receive(struct tty_struct *tty, const unsigned char *cp, char *fp, int count)
{
struct serport *serport = (struct serport*) tty->disc_data;
unsigned long flags;
unsigned int ch_flags;
- int ret = 0;
int i;
spin_lock_irqsave(&serport->lock, flags);
- if (!test_bit(SERPORT_ACTIVE, &serport->flags)) {
- ret = -EINVAL;
+ if (!test_bit(SERPORT_ACTIVE, &serport->flags))
goto out;
- }
for (i = 0; i < count; i++) {
switch (fp[i]) {
@@ -156,8 +152,6 @@ static unsigned int serport_ldisc_receive(struct tty_struct *tty,
out:
spin_unlock_irqrestore(&serport->lock, flags);
-
- return ret == 0 ? count : ret;
}
/*
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index 1d44d47..86a5c4f 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -674,7 +674,7 @@ gigaset_tty_ioctl(struct tty_struct *tty, struct file *file,
* cflags buffer containing error flags for received characters (ignored)
* count number of received characters
*/
-static unsigned int
+static void
gigaset_tty_receive(struct tty_struct *tty, const unsigned char *buf,
char *cflags, int count)
{
@@ -683,12 +683,12 @@ gigaset_tty_receive(struct tty_struct *tty, const unsigned char *buf,
struct inbuf_t *inbuf;
if (!cs)
- return -ENODEV;
+ return;
inbuf = cs->inbuf;
if (!inbuf) {
dev_err(cs->dev, "%s: no inbuf\n", __func__);
cs_put(cs);
- return -EINVAL;
+ return;
}
tail = inbuf->tail;
@@ -725,8 +725,6 @@ gigaset_tty_receive(struct tty_struct *tty, const unsigned char *buf,
gig_dbg(DEBUG_INTR, "%s-->BH", __func__);
gigaset_schedule_event(cs);
cs_put(cs);
-
- return count;
}
/*
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index 1a05fe0..f91f82e 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -747,8 +747,8 @@ static void st_tty_close(struct tty_struct *tty)
pr_debug("%s: done ", __func__);
}
-static unsigned int st_tty_receive(struct tty_struct *tty,
- const unsigned char *data, char *tty_flags, int count)
+static void st_tty_receive(struct tty_struct *tty, const unsigned char *data,
+ char *tty_flags, int count)
{
#ifdef VERBOSE
print_hex_dump(KERN_DEBUG, ">in>", DUMP_PREFIX_NONE,
@@ -761,8 +761,6 @@ static unsigned int st_tty_receive(struct tty_struct *tty,
*/
st_recv(tty->disc_data, data, count);
pr_debug("done %s", __func__);
-
- return count;
}
/* wake-up function called in from the TTY layer
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index 5f25889..44b28b2 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -185,7 +185,7 @@ static int max_interrupt_work = 10;
static int nopnp;
#endif
-static int el3_common_init(struct net_device *dev);
+static int __devinit el3_common_init(struct net_device *dev);
static void el3_common_remove(struct net_device *dev);
static ushort id_read_eeprom(int index);
static ushort read_eeprom(int ioaddr, int index);
@@ -395,7 +395,7 @@ static struct isa_driver el3_isa_driver = {
static int isa_registered;
#ifdef CONFIG_PNP
-static const struct pnp_device_id el3_pnp_ids[] __devinitconst = {
+static struct pnp_device_id el3_pnp_ids[] = {
{ .id = "TCM5090" }, /* 3Com Etherlink III (TP) */
{ .id = "TCM5091" }, /* 3Com Etherlink III */
{ .id = "TCM5094" }, /* 3Com Etherlink III (combo) */
@@ -478,7 +478,7 @@ static int pnp_registered;
#endif /* CONFIG_PNP */
#ifdef CONFIG_EISA
-static const struct eisa_device_id el3_eisa_ids[] __devinitconst = {
+static struct eisa_device_id el3_eisa_ids[] = {
{ "TCM5090" },
{ "TCM5091" },
{ "TCM5092" },
@@ -508,7 +508,7 @@ static int eisa_registered;
#ifdef CONFIG_MCA
static int el3_mca_probe(struct device *dev);
-static const short el3_mca_adapter_ids[] __devinitconst = {
+static short el3_mca_adapter_ids[] __initdata = {
0x627c,
0x627d,
0x62db,
@@ -517,7 +517,7 @@ static const short el3_mca_adapter_ids[] __devinitconst = {
0x0000
};
-static const char *const el3_mca_adapter_names[] __devinitconst = {
+static char *el3_mca_adapter_names[] __initdata = {
"3Com 3c529 EtherLink III (10base2)",
"3Com 3c529 EtherLink III (10baseT)",
"3Com 3c529 EtherLink III (test mode)",
@@ -601,7 +601,7 @@ static void el3_common_remove (struct net_device *dev)
}
#ifdef CONFIG_MCA
-static int __devinit el3_mca_probe(struct device *device)
+static int __init el3_mca_probe(struct device *device)
{
/* Based on Erik Nygren's (nygren@mit.edu) 3c529 patch,
* heavily modified by Chris Beauregard
@@ -671,7 +671,7 @@ static int __devinit el3_mca_probe(struct device *device)
#endif /* CONFIG_MCA */
#ifdef CONFIG_EISA
-static int __devinit el3_eisa_probe (struct device *device)
+static int __init el3_eisa_probe (struct device *device)
{
short i;
int ioaddr, irq, if_port;
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 99f43d2..8cc2256 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -901,14 +901,14 @@ static const struct dev_pm_ops vortex_pm_ops = {
#endif /* !CONFIG_PM */
#ifdef CONFIG_EISA
-static const struct eisa_device_id vortex_eisa_ids[] __devinitconst = {
+static struct eisa_device_id vortex_eisa_ids[] = {
{ "TCM5920", CH_3C592 },
{ "TCM5970", CH_3C597 },
{ "" }
};
MODULE_DEVICE_TABLE(eisa, vortex_eisa_ids);
-static int __devinit vortex_eisa_probe(struct device *device)
+static int __init vortex_eisa_probe(struct device *device)
{
void __iomem *ioaddr;
struct eisa_device *edev;
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 73c7e03..3df0c0f 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -167,8 +167,8 @@ static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
#endif
-static unsigned int ldisc_receive(struct tty_struct *tty,
- const u8 *data, char *flags, int count)
+static void ldisc_receive(struct tty_struct *tty, const u8 *data,
+ char *flags, int count)
{
struct sk_buff *skb = NULL;
struct ser_device *ser;
@@ -215,8 +215,6 @@ static unsigned int ldisc_receive(struct tty_struct *tty,
} else
++ser->dev->stats.rx_dropped;
update_tty_status(ser);
-
- return count;
}
static int handle_tx(struct ser_device *ser)
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index d499056..1767811 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -923,7 +923,7 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
mem_size = resource_size(mem);
if (!request_mem_region(mem->start, mem_size, pdev->name)) {
err = -EBUSY;
- goto failed_req;
+ goto failed_get;
}
base = ioremap(mem->start, mem_size);
@@ -977,9 +977,8 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
iounmap(base);
failed_map:
release_mem_region(mem->start, mem_size);
- failed_req:
- clk_put(clk);
failed_get:
+ clk_put(clk);
failed_clock:
return err;
}
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index 75622d5..1b49df6 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -425,17 +425,16 @@ static void slc_setup(struct net_device *dev)
* in parallel
*/
-static unsigned int slcan_receive_buf(struct tty_struct *tty,
+static void slcan_receive_buf(struct tty_struct *tty,
const unsigned char *cp, char *fp, int count)
{
struct slcan *sl = (struct slcan *) tty->disc_data;
- int bytes = count;
if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
- return -ENODEV;
+ return;
/* Read the characters out of the buffer */
- while (bytes--) {
+ while (count--) {
if (fp && *fp++) {
if (!test_and_set_bit(SLF_ERROR, &sl->flags))
sl->dev->stats.rx_errors++;
@@ -444,8 +443,6 @@ static unsigned int slcan_receive_buf(struct tty_struct *tty,
}
slcan_unesc(sl, *cp++);
}
-
- return count;
}
/************************************
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 29a4f06..dcc4a17 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -1781,8 +1781,8 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
ndev = alloc_etherdev(sizeof(struct emac_priv));
if (!ndev) {
dev_err(&pdev->dev, "error allocating net_device\n");
- clk_put(emac_clk);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto free_clk;
}
platform_set_drvdata(pdev, ndev);
@@ -1796,7 +1796,8 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
pdata = pdev->dev.platform_data;
if (!pdata) {
dev_err(&pdev->dev, "no platform data\n");
- return -ENODEV;
+ rc = -ENODEV;
+ goto probe_quit;
}
/* MAC addr and PHY mask , RMII enable info from platform_data */
@@ -1929,8 +1930,9 @@ no_dma:
iounmap(priv->remap_addr);
probe_quit:
- clk_put(emac_clk);
free_netdev(ndev);
+free_clk:
+ clk_put(emac_clk);
return rc;
}
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index 1765405..8b0084d 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -331,18 +331,18 @@ static struct {
"DE422",\
""}
-static const char* const depca_signature[] __devinitconst = DEPCA_SIGNATURE;
+static char* __initdata depca_signature[] = DEPCA_SIGNATURE;
enum depca_type {
DEPCA, de100, de101, de200, de201, de202, de210, de212, de422, unknown
};
-static const char depca_string[] = "depca";
+static char depca_string[] = "depca";
static int depca_device_remove (struct device *device);
#ifdef CONFIG_EISA
-static const struct eisa_device_id depca_eisa_ids[] __devinitconst = {
+static struct eisa_device_id depca_eisa_ids[] = {
{ "DEC4220", de422 },
{ "" }
};
@@ -367,19 +367,19 @@ static struct eisa_driver depca_eisa_driver = {
#define DE210_ID 0x628d
#define DE212_ID 0x6def
-static const short depca_mca_adapter_ids[] __devinitconst = {
+static short depca_mca_adapter_ids[] = {
DE210_ID,
DE212_ID,
0x0000
};
-static const char *depca_mca_adapter_name[] = {
+static char *depca_mca_adapter_name[] = {
"DEC EtherWORKS MC Adapter (DE210)",
"DEC EtherWORKS MC Adapter (DE212)",
NULL
};
-static const enum depca_type depca_mca_adapter_type[] = {
+static enum depca_type depca_mca_adapter_type[] = {
de210,
de212,
0
@@ -541,9 +541,10 @@ static void SetMulticastFilter(struct net_device *dev);
static int load_packet(struct net_device *dev, struct sk_buff *skb);
static void depca_dbg_open(struct net_device *dev);
-static const u_char de1xx_irq[] __devinitconst = { 2, 3, 4, 5, 7, 9, 0 };
-static const u_char de2xx_irq[] __devinitconst = { 5, 9, 10, 11, 15, 0 };
-static const u_char de422_irq[] __devinitconst = { 5, 9, 10, 11, 0 };
+static u_char de1xx_irq[] __initdata = { 2, 3, 4, 5, 7, 9, 0 };
+static u_char de2xx_irq[] __initdata = { 5, 9, 10, 11, 15, 0 };
+static u_char de422_irq[] __initdata = { 5, 9, 10, 11, 0 };
+static u_char *depca_irq;
static int irq;
static int io;
@@ -579,7 +580,7 @@ static const struct net_device_ops depca_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-static int __devinit depca_hw_init (struct net_device *dev, struct device *device)
+static int __init depca_hw_init (struct net_device *dev, struct device *device)
{
struct depca_private *lp;
int i, j, offset, netRAM, mem_len, status = 0;
@@ -747,7 +748,6 @@ static int __devinit depca_hw_init (struct net_device *dev, struct device *devic
if (dev->irq < 2) {
unsigned char irqnum;
unsigned long irq_mask, delay;
- const u_char *depca_irq;
irq_mask = probe_irq_on();
@@ -770,7 +770,6 @@ static int __devinit depca_hw_init (struct net_device *dev, struct device *devic
break;
default:
- depca_irq = NULL;
break; /* Not reached */
}
@@ -1303,7 +1302,7 @@ static void SetMulticastFilter(struct net_device *dev)
}
}
-static int __devinit depca_common_init (u_long ioaddr, struct net_device **devp)
+static int __init depca_common_init (u_long ioaddr, struct net_device **devp)
{
int status = 0;
@@ -1334,7 +1333,7 @@ static int __devinit depca_common_init (u_long ioaddr, struct net_device **devp)
/*
** Microchannel bus I/O device probe
*/
-static int __devinit depca_mca_probe(struct device *device)
+static int __init depca_mca_probe(struct device *device)
{
unsigned char pos[2];
unsigned char where;
@@ -1458,7 +1457,7 @@ static int __devinit depca_mca_probe(struct device *device)
** ISA bus I/O device probe
*/
-static void __devinit depca_platform_probe (void)
+static void __init depca_platform_probe (void)
{
int i;
struct platform_device *pldev;
@@ -1498,7 +1497,7 @@ static void __devinit depca_platform_probe (void)
}
}
-static enum depca_type __devinit depca_shmem_probe (ulong *mem_start)
+static enum depca_type __init depca_shmem_probe (ulong *mem_start)
{
u_long mem_base[] = DEPCA_RAM_BASE_ADDRESSES;
enum depca_type adapter = unknown;
@@ -1559,7 +1558,7 @@ static int __devinit depca_isa_probe (struct platform_device *device)
*/
#ifdef CONFIG_EISA
-static int __devinit depca_eisa_probe (struct device *device)
+static int __init depca_eisa_probe (struct device *device)
{
enum depca_type adapter = unknown;
struct eisa_device *edev;
@@ -1630,7 +1629,7 @@ static int __devexit depca_device_remove (struct device *device)
** and Boot (readb) ROM. This will also give us a clue to the network RAM
** base address.
*/
-static int __devinit DepcaSignature(char *name, u_long base_addr)
+static int __init DepcaSignature(char *name, u_long base_addr)
{
u_int i, j, k;
void __iomem *ptr;
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index fbaff35..ee597e6 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -1157,9 +1157,6 @@ dm9000_open(struct net_device *dev)
irqflags |= IRQF_SHARED;
- if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
- return -EAGAIN;
-
/* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */
iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
mdelay(1); /* delay needs by DM9000B */
@@ -1168,6 +1165,9 @@ dm9000_open(struct net_device *dev)
dm9000_reset(db);
dm9000_init_dm9000(dev);
+ if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
+ return -EAGAIN;
+
/* Init driver variable */
db->dbug_cnt = 0;
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 9920896..3e5d0b6 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -456,7 +456,7 @@ out:
* a block of 6pack data has been received, which can now be decapsulated
* and sent on to some IP layer for further processing.
*/
-static unsigned int sixpack_receive_buf(struct tty_struct *tty,
+static void sixpack_receive_buf(struct tty_struct *tty,
const unsigned char *cp, char *fp, int count)
{
struct sixpack *sp;
@@ -464,11 +464,11 @@ static unsigned int sixpack_receive_buf(struct tty_struct *tty,
int count1;
if (!count)
- return 0;
+ return;
sp = sp_get(tty);
if (!sp)
- return -ENODEV;
+ return;
memcpy(buf, cp, count < sizeof(buf) ? count : sizeof(buf));
@@ -487,8 +487,6 @@ static unsigned int sixpack_receive_buf(struct tty_struct *tty,
sp_put(sp);
tty_unthrottle(tty);
-
- return count1;
}
/*
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 0e4f235..4c62839 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -923,14 +923,13 @@ static long mkiss_compat_ioctl(struct tty_struct *tty, struct file *file,
* a block of data has been received, which can now be decapsulated
* and sent on to the AX.25 layer for further processing.
*/
-static unsigned int mkiss_receive_buf(struct tty_struct *tty,
- const unsigned char *cp, char *fp, int count)
+static void mkiss_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count)
{
struct mkiss *ax = mkiss_get(tty);
- int bytes = count;
if (!ax)
- return -ENODEV;
+ return;
/*
* Argh! mtu change time! - costs us the packet part received
@@ -940,7 +939,7 @@ static unsigned int mkiss_receive_buf(struct tty_struct *tty,
ax_changedmtu(ax);
/* Read the characters out of the buffer */
- while (bytes--) {
+ while (count--) {
if (fp != NULL && *fp++) {
if (!test_and_set_bit(AXF_ERROR, &ax->flags))
ax->dev->stats.rx_errors++;
@@ -953,8 +952,6 @@ static unsigned int mkiss_receive_buf(struct tty_struct *tty,
mkiss_put(ax);
tty_unthrottle(tty);
-
- return count;
}
/*
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index c52a1df..8e10d2f 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -188,14 +188,14 @@ struct hp100_private {
* variables
*/
#ifdef CONFIG_ISA
-static const char *const hp100_isa_tbl[] __devinitconst = {
+static const char *hp100_isa_tbl[] = {
"HWPF150", /* HP J2573 rev A */
"HWP1950", /* HP J2573 */
};
#endif
#ifdef CONFIG_EISA
-static const struct eisa_device_id hp100_eisa_tbl[] __devinitconst = {
+static struct eisa_device_id hp100_eisa_tbl[] = {
{ "HWPF180" }, /* HP J2577 rev A */
{ "HWP1920" }, /* HP 27248B */
{ "HWP1940" }, /* HP J2577 */
@@ -336,7 +336,7 @@ static __devinit const char *hp100_read_id(int ioaddr)
}
#ifdef CONFIG_ISA
-static __devinit int hp100_isa_probe1(struct net_device *dev, int ioaddr)
+static __init int hp100_isa_probe1(struct net_device *dev, int ioaddr)
{
const char *sig;
int i;
@@ -372,7 +372,7 @@ static __devinit int hp100_isa_probe1(struct net_device *dev, int ioaddr)
* EISA and PCI are handled by device infrastructure.
*/
-static int __devinit hp100_isa_probe(struct net_device *dev, int addr)
+static int __init hp100_isa_probe(struct net_device *dev, int addr)
{
int err = -ENODEV;
@@ -396,7 +396,7 @@ static int __devinit hp100_isa_probe(struct net_device *dev, int addr)
#endif /* CONFIG_ISA */
#if !defined(MODULE) && defined(CONFIG_ISA)
-struct net_device * __devinit hp100_probe(int unit)
+struct net_device * __init hp100_probe(int unit)
{
struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private));
int err;
@@ -2843,7 +2843,7 @@ static void cleanup_dev(struct net_device *d)
}
#ifdef CONFIG_EISA
-static int __devinit hp100_eisa_probe (struct device *gendev)
+static int __init hp100_eisa_probe (struct device *gendev)
{
struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private));
struct eisa_device *edev = to_eisa_device(gendev);
diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c
index 136d754..a7d6cad 100644
--- a/drivers/net/ibmlana.c
+++ b/drivers/net/ibmlana.c
@@ -895,12 +895,12 @@ static int ibmlana_irq;
static int ibmlana_io;
static int startslot; /* counts through slots when probing multiple devices */
-static const short ibmlana_adapter_ids[] __devinitconst = {
+static short ibmlana_adapter_ids[] __initdata = {
IBM_LANA_ID,
0x0000
};
-static const char *const ibmlana_adapter_names[] __devinitconst = {
+static char *ibmlana_adapter_names[] __devinitdata = {
"IBM LAN Adapter/A",
NULL
};
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
index 035861d..3352b24 100644
--- a/drivers/net/irda/irtty-sir.c
+++ b/drivers/net/irda/irtty-sir.c
@@ -216,23 +216,23 @@ static int irtty_do_write(struct sir_dev *dev, const unsigned char *ptr, size_t
* usbserial: urb-complete-interrupt / softint
*/
-static unsigned int irtty_receive_buf(struct tty_struct *tty,
- const unsigned char *cp, char *fp, int count)
+static void irtty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count)
{
struct sir_dev *dev;
struct sirtty_cb *priv = tty->disc_data;
int i;
- IRDA_ASSERT(priv != NULL, return -ENODEV;);
- IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -EINVAL;);
+ IRDA_ASSERT(priv != NULL, return;);
+ IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return;);
if (unlikely(count==0)) /* yes, this happens */
- return 0;
+ return;
dev = priv->dev;
if (!dev) {
IRDA_WARNING("%s(), not ready yet!\n", __func__);
- return -ENODEV;
+ return;
}
for (i = 0; i < count; i++) {
@@ -242,13 +242,11 @@ static unsigned int irtty_receive_buf(struct tty_struct *tty,
if (fp && *fp++) {
IRDA_DEBUG(0, "Framing or parity error!\n");
sirdev_receive(dev, NULL, 0); /* notify sir_dev (updating stats) */
- return -EINVAL;
+ return;
}
}
sirdev_receive(dev, cp, count);
-
- return count;
}
/*
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 69b5707..8800e1f 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -222,19 +222,19 @@ static void smsc_ircc_set_transceiver_for_speed(struct smsc_ircc_cb *self, u32 s
static void smsc_ircc_sir_wait_hw_transmitter_finish(struct smsc_ircc_cb *self);
/* Probing */
-static int smsc_ircc_look_for_chips(void);
-static const struct smsc_chip * smsc_ircc_probe(unsigned short cfg_base, u8 reg, const struct smsc_chip *chip, char *type);
-static int smsc_superio_flat(const struct smsc_chip *chips, unsigned short cfg_base, char *type);
-static int smsc_superio_paged(const struct smsc_chip *chips, unsigned short cfg_base, char *type);
-static int smsc_superio_fdc(unsigned short cfg_base);
-static int smsc_superio_lpc(unsigned short cfg_base);
+static int __init smsc_ircc_look_for_chips(void);
+static const struct smsc_chip * __init smsc_ircc_probe(unsigned short cfg_base, u8 reg, const struct smsc_chip *chip, char *type);
+static int __init smsc_superio_flat(const struct smsc_chip *chips, unsigned short cfg_base, char *type);
+static int __init smsc_superio_paged(const struct smsc_chip *chips, unsigned short cfg_base, char *type);
+static int __init smsc_superio_fdc(unsigned short cfg_base);
+static int __init smsc_superio_lpc(unsigned short cfg_base);
#ifdef CONFIG_PCI
-static int preconfigure_smsc_chip(struct smsc_ircc_subsystem_configuration *conf);
-static int preconfigure_through_82801(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf);
-static void preconfigure_ali_port(struct pci_dev *dev,
+static int __init preconfigure_smsc_chip(struct smsc_ircc_subsystem_configuration *conf);
+static int __init preconfigure_through_82801(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf);
+static void __init preconfigure_ali_port(struct pci_dev *dev,
unsigned short port);
-static int preconfigure_through_ali(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf);
-static int smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
+static int __init preconfigure_through_ali(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf);
+static int __init smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
unsigned short ircc_fir,
unsigned short ircc_sir,
unsigned char ircc_dma,
@@ -366,7 +366,7 @@ static inline void register_bank(int iobase, int bank)
}
/* PNP hotplug support */
-static const struct pnp_device_id smsc_ircc_pnp_table[] __devinitconst = {
+static const struct pnp_device_id smsc_ircc_pnp_table[] = {
{ .id = "SMCf010", .driver_data = 0 },
/* and presumably others */
{ }
@@ -515,7 +515,7 @@ static const struct net_device_ops smsc_ircc_netdev_ops = {
* Try to open driver instance
*
*/
-static int __devinit smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma, u8 irq)
+static int __init smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma, u8 irq)
{
struct smsc_ircc_cb *self;
struct net_device *dev;
@@ -2273,7 +2273,7 @@ static int __init smsc_superio_paged(const struct smsc_chip *chips, unsigned sho
}
-static int __devinit smsc_access(unsigned short cfg_base, unsigned char reg)
+static int __init smsc_access(unsigned short cfg_base, unsigned char reg)
{
IRDA_DEBUG(1, "%s\n", __func__);
@@ -2281,7 +2281,7 @@ static int __devinit smsc_access(unsigned short cfg_base, unsigned char reg)
return inb(cfg_base) != reg ? -1 : 0;
}
-static const struct smsc_chip * __devinit smsc_ircc_probe(unsigned short cfg_base, u8 reg, const struct smsc_chip *chip, char *type)
+static const struct smsc_chip * __init smsc_ircc_probe(unsigned short cfg_base, u8 reg, const struct smsc_chip *chip, char *type)
{
u8 devid, xdevid, rev;
@@ -2406,7 +2406,7 @@ static int __init smsc_superio_lpc(unsigned short cfg_base)
#ifdef CONFIG_PCI
#define PCIID_VENDOR_INTEL 0x8086
#define PCIID_VENDOR_ALI 0x10b9
-static const struct smsc_ircc_subsystem_configuration subsystem_configurations[] __devinitconst = {
+static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __initdata = {
/*
* Subsystems needing entries:
* 0x10b9:0x1533 0x103c:0x0850 HP nx9010 family
@@ -2532,7 +2532,7 @@ static const struct smsc_ircc_subsystem_configuration subsystem_configurations[]
* (FIR port, SIR port, FIR DMA, FIR IRQ)
* through the chip configuration port.
*/
-static int __devinit preconfigure_smsc_chip(struct
+static int __init preconfigure_smsc_chip(struct
smsc_ircc_subsystem_configuration
*conf)
{
@@ -2633,7 +2633,7 @@ static int __devinit preconfigure_smsc_chip(struct
* or Intel 82801DB/DBL (ICH4/ICH4-L) LPC Interface Bridge.
* They all work the same way!
*/
-static int __devinit preconfigure_through_82801(struct pci_dev *dev,
+static int __init preconfigure_through_82801(struct pci_dev *dev,
struct
smsc_ircc_subsystem_configuration
*conf)
@@ -2786,7 +2786,7 @@ static int __devinit preconfigure_through_82801(struct pci_dev *dev,
* This is based on reverse-engineering since ALi does not
* provide any data sheet for the 1533 chip.
*/
-static void __devinit preconfigure_ali_port(struct pci_dev *dev,
+static void __init preconfigure_ali_port(struct pci_dev *dev,
unsigned short port)
{
unsigned char reg;
@@ -2824,7 +2824,7 @@ static void __devinit preconfigure_ali_port(struct pci_dev *dev,
IRDA_MESSAGE("Activated ALi 1533 ISA bridge port 0x%04x.\n", port);
}
-static int __devinit preconfigure_through_ali(struct pci_dev *dev,
+static int __init preconfigure_through_ali(struct pci_dev *dev,
struct
smsc_ircc_subsystem_configuration
*conf)
@@ -2837,7 +2837,7 @@ static int __devinit preconfigure_through_ali(struct pci_dev *dev,
return preconfigure_smsc_chip(conf);
}
-static int __devinit smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
+static int __init smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
unsigned short ircc_fir,
unsigned short ircc_sir,
unsigned char ircc_dma,
@@ -2849,7 +2849,7 @@ static int __devinit smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
int ret = 0;
for_each_pci_dev(dev) {
- const struct smsc_ircc_subsystem_configuration *conf;
+ struct smsc_ircc_subsystem_configuration *conf;
/*
* Cache the subsystem vendor/device:
diff --git a/drivers/net/ks8842.c b/drivers/net/ks8842.c
index 4d40626..fc12ac0 100644
--- a/drivers/net/ks8842.c
+++ b/drivers/net/ks8842.c
@@ -661,7 +661,7 @@ static void ks8842_rx_frame(struct net_device *netdev,
/* check the status */
if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
- struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len);
+ struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len + 3);
if (skb) {
diff --git a/drivers/net/ne3210.c b/drivers/net/ne3210.c
index e8984b0c..243ed2a 100644
--- a/drivers/net/ne3210.c
+++ b/drivers/net/ne3210.c
@@ -80,20 +80,17 @@ static void ne3210_block_output(struct net_device *dev, int count, const unsigne
#define NE3210_DEBUG 0x0
-static const unsigned char irq_map[] __devinitconst =
- { 15, 12, 11, 10, 9, 7, 5, 3 };
-static const unsigned int shmem_map[] __devinitconst =
- { 0xff0, 0xfe0, 0xfff0, 0xd8, 0xffe0, 0xffc0, 0xd0, 0x0 };
-static const char *const ifmap[] __devinitconst =
- { "UTP", "?", "BNC", "AUI" };
-static const int ifmap_val[] __devinitconst = {
+static unsigned char irq_map[] __initdata = {15, 12, 11, 10, 9, 7, 5, 3};
+static unsigned int shmem_map[] __initdata = {0xff0, 0xfe0, 0xfff0, 0xd8, 0xffe0, 0xffc0, 0xd0, 0x0};
+static const char *ifmap[] __initdata = {"UTP", "?", "BNC", "AUI"};
+static int ifmap_val[] __initdata = {
IF_PORT_10BASET,
IF_PORT_UNKNOWN,
IF_PORT_10BASE2,
IF_PORT_AUI,
};
-static int __devinit ne3210_eisa_probe (struct device *device)
+static int __init ne3210_eisa_probe (struct device *device)
{
unsigned long ioaddr, phys_mem;
int i, retval, port_index;
@@ -316,7 +313,7 @@ static void ne3210_block_output(struct net_device *dev, int count,
memcpy_toio(shmem, buf, count);
}
-static const struct eisa_device_id ne3210_ids[] __devinitconst = {
+static struct eisa_device_id ne3210_ids[] = {
{ "EGL0101" },
{ "NVL1801" },
{ "" },
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c
index 53872d7..a1b82c9 100644
--- a/drivers/net/ppp_async.c
+++ b/drivers/net/ppp_async.c
@@ -340,7 +340,7 @@ ppp_asynctty_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
}
/* May sleep, don't call from interrupt level or with interrupts disabled */
-static unsigned int
+static void
ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
char *cflags, int count)
{
@@ -348,7 +348,7 @@ ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
unsigned long flags;
if (!ap)
- return -ENODEV;
+ return;
spin_lock_irqsave(&ap->recv_lock, flags);
ppp_async_input(ap, buf, cflags, count);
spin_unlock_irqrestore(&ap->recv_lock, flags);
@@ -356,8 +356,6 @@ ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
tasklet_schedule(&ap->tsk);
ap_put(ap);
tty_unthrottle(tty);
-
- return count;
}
static void
diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c
index 0815790..2573f52 100644
--- a/drivers/net/ppp_synctty.c
+++ b/drivers/net/ppp_synctty.c
@@ -381,7 +381,7 @@ ppp_sync_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
}
/* May sleep, don't call from interrupt level or with interrupts disabled */
-static unsigned int
+static void
ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
char *cflags, int count)
{
@@ -389,7 +389,7 @@ ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
unsigned long flags;
if (!ap)
- return -ENODEV;
+ return;
spin_lock_irqsave(&ap->recv_lock, flags);
ppp_sync_input(ap, buf, cflags, count);
spin_unlock_irqrestore(&ap->recv_lock, flags);
@@ -397,8 +397,6 @@ ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
tasklet_schedule(&ap->tsk);
sp_put(ap);
tty_unthrottle(tty);
-
- return count;
}
static void
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index 584809c..8ec1a9a 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -670,17 +670,16 @@ static void sl_setup(struct net_device *dev)
* in parallel
*/
-static unsigned int slip_receive_buf(struct tty_struct *tty,
- const unsigned char *cp, char *fp, int count)
+static void slip_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count)
{
struct slip *sl = tty->disc_data;
- int bytes = count;
if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev))
- return -ENODEV;
+ return;
/* Read the characters out of the buffer */
- while (bytes--) {
+ while (count--) {
if (fp && *fp++) {
if (!test_and_set_bit(SLF_ERROR, &sl->flags))
sl->dev->stats.rx_errors++;
@@ -694,8 +693,6 @@ static unsigned int slip_receive_buf(struct tty_struct *tty,
#endif
slip_unesc(sl, *cp++);
}
-
- return count;
}
/************************************
diff --git a/drivers/net/smc-mca.c b/drivers/net/smc-mca.c
index 0f29f26..d07c39c 100644
--- a/drivers/net/smc-mca.c
+++ b/drivers/net/smc-mca.c
@@ -156,7 +156,7 @@ static const struct {
{ 14, 15 }
};
-static const short smc_mca_adapter_ids[] __devinitconst = {
+static short smc_mca_adapter_ids[] __initdata = {
0x61c8,
0x61c9,
0x6fc0,
@@ -168,7 +168,7 @@ static const short smc_mca_adapter_ids[] __devinitconst = {
0x0000
};
-static const char *const smc_mca_adapter_names[] __devinitconst = {
+static char *smc_mca_adapter_names[] __initdata = {
"SMC Ethercard PLUS Elite/A BNC/AUI (WD8013EP/A)",
"SMC Ethercard PLUS Elite/A UTP/AUI (WD8013WP/A)",
"WD Ethercard PLUS/A (WD8003E/A or WD8003ET/A)",
@@ -199,7 +199,7 @@ static const struct net_device_ops ultramca_netdev_ops = {
#endif
};
-static int __devinit ultramca_probe(struct device *gen_dev)
+static int __init ultramca_probe(struct device *gen_dev)
{
unsigned short ioaddr;
struct net_device *dev;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index f4b01c6..a1f9f9e 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -5774,7 +5774,7 @@ static void tg3_skb_error_unmap(struct tg3_napi *tnapi,
dma_unmap_addr(txb, mapping),
skb_headlen(skb),
PCI_DMA_TODEVICE);
- for (i = 0; i <= last; i++) {
+ for (i = 0; i < last; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
entry = NEXT_TX(entry);
diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
index 1313aa1..2bedc0a 100644
--- a/drivers/net/tokenring/madgemc.c
+++ b/drivers/net/tokenring/madgemc.c
@@ -727,7 +727,7 @@ static int __devexit madgemc_remove(struct device *device)
return 0;
}
-static const short madgemc_adapter_ids[] __devinitconst = {
+static short madgemc_adapter_ids[] __initdata = {
0x002d,
0x0000
};
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index 45144d5..efaa1d6 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -1995,7 +1995,7 @@ SetMulticastFilter(struct net_device *dev)
static u_char de4x5_irq[] = EISA_ALLOWED_IRQ_LIST;
-static int __devinit de4x5_eisa_probe (struct device *gendev)
+static int __init de4x5_eisa_probe (struct device *gendev)
{
struct eisa_device *edev;
u_long iobase;
@@ -2097,7 +2097,7 @@ static int __devexit de4x5_eisa_remove (struct device *device)
return 0;
}
-static const struct eisa_device_id de4x5_eisa_ids[] __devinitconst = {
+static struct eisa_device_id de4x5_eisa_ids[] = {
{ "DEC4250", 0 }, /* 0 is the board name index... */
{ "" }
};
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index d7221c4..8056f8a 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -495,7 +495,7 @@ static void catc_ctrl_run(struct catc *catc)
if (!q->dir && q->buf && q->len)
memcpy(catc->ctrl_buf, q->buf, q->len);
- if ((status = usb_submit_urb(catc->ctrl_urb, GFP_KERNEL)))
+ if ((status = usb_submit_urb(catc->ctrl_urb, GFP_ATOMIC)))
err("submit(ctrl_urb) status %d", status);
}
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index cdd3ae4..f33ca6a 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -54,7 +54,7 @@
#include <linux/usb/usbnet.h>
#include <linux/usb/cdc.h>
-#define DRIVER_VERSION "24-May-2011"
+#define DRIVER_VERSION "01-June-2011"
/* CDC NCM subclass 3.2.1 */
#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10
@@ -1234,6 +1234,7 @@ static struct usb_driver cdc_ncm_driver = {
.disconnect = cdc_ncm_disconnect,
.suspend = usbnet_suspend,
.resume = usbnet_resume,
+ .reset_resume = usbnet_resume,
.supports_autosuspend = 1,
};
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 40398bf..24297b2 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -517,18 +517,17 @@ static int x25_asy_close(struct net_device *dev)
* and sent on to some IP layer for further processing.
*/
-static unsigned int x25_asy_receive_buf(struct tty_struct *tty,
+static void x25_asy_receive_buf(struct tty_struct *tty,
const unsigned char *cp, char *fp, int count)
{
struct x25_asy *sl = tty->disc_data;
- int bytes = count;
if (!sl || sl->magic != X25_ASY_MAGIC || !netif_running(sl->dev))
return;
/* Read the characters out of the buffer */
- while (bytes--) {
+ while (count--) {
if (fp && *fp++) {
if (!test_and_set_bit(SLF_ERROR, &sl->flags))
sl->dev->stats.rx_errors++;
@@ -537,8 +536,6 @@ static unsigned int x25_asy_receive_buf(struct tty_struct *tty,
}
x25_asy_unesc(sl, *cp++);
}
-
- return count;
}
/*
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index d9ff841..d9c08c6 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -26,7 +26,6 @@ config ATH9K
config ATH9K_PCI
bool "Atheros ath9k PCI/PCIe bus support"
depends on ATH9K && PCI
- default PCI
---help---
This option enables the PCI bus support in ath9k.
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index 015d974..2d4c091 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -829,7 +829,7 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
if (AR_SREV_9271(ah)) {
if (!ar9285_hw_cl_cal(ah, chan))
return false;
- } else if (AR_SREV_9285_12_OR_LATER(ah)) {
+ } else if (AR_SREV_9285(ah) && AR_SREV_9285_12_OR_LATER(ah)) {
if (!ar9285_hw_clc(ah, chan))
return false;
} else {
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 0ca7635..ff8150e 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -4645,10 +4645,16 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
case 1:
break;
case 2:
- scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
+ if (scaledPower > REDUCE_SCALED_POWER_BY_TWO_CHAIN)
+ scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
+ else
+ scaledPower = 0;
break;
case 3:
- scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
+ if (scaledPower > REDUCE_SCALED_POWER_BY_THREE_CHAIN)
+ scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
+ else
+ scaledPower = 0;
break;
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index eee23ec..892c48b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -1381,3 +1381,25 @@ void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah)
"==== BB update: done ====\n\n");
}
EXPORT_SYMBOL(ar9003_hw_bb_watchdog_dbg_info);
+
+void ar9003_hw_disable_phy_restart(struct ath_hw *ah)
+{
+ u32 val;
+
+ /* While receiving unsupported rate frame rx state machine
+ * gets into a state 0xb and if phy_restart happens in that
+ * state, BB would go hang. If RXSM is in 0xb state after
+ * first bb panic, ensure to disable the phy_restart.
+ */
+ if (!((MS(ah->bb_watchdog_last_status,
+ AR_PHY_WATCHDOG_RX_OFDM_SM) == 0xb) ||
+ ah->bb_hang_rx_ofdm))
+ return;
+
+ ah->bb_hang_rx_ofdm = true;
+ val = REG_READ(ah, AR_PHY_RESTART);
+ val &= ~AR_PHY_RESTART_ENA;
+
+ REG_WRITE(ah, AR_PHY_RESTART, val);
+}
+EXPORT_SYMBOL(ar9003_hw_disable_phy_restart);
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index 7856f0d..343fc9f 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -524,10 +524,16 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah,
case 1:
break;
case 2:
- scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
+ if (scaledPower > REDUCE_SCALED_POWER_BY_TWO_CHAIN)
+ scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
+ else
+ scaledPower = 0;
break;
case 3:
- scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
+ if (scaledPower > REDUCE_SCALED_POWER_BY_THREE_CHAIN)
+ scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
+ else
+ scaledPower = 0;
break;
}
scaledPower = max((u16)0, scaledPower);
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 72543ce..1be7c8b 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1555,9 +1555,12 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (ah->btcoex_hw.enabled)
ath9k_hw_btcoex_enable(ah);
- if (AR_SREV_9300_20_OR_LATER(ah))
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
ar9003_hw_bb_watchdog_config(ah);
+ ar9003_hw_disable_phy_restart(ah);
+ }
+
ath9k_hw_apply_gpio_override(ah);
return 0;
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 57435ce..4b157c5 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -842,6 +842,7 @@ struct ath_hw {
u32 bb_watchdog_last_status;
u32 bb_watchdog_timeout_ms; /* in ms, 0 to disable */
+ u8 bb_hang_rx_ofdm; /* true if bb hang due to rx_ofdm */
unsigned int paprd_target_power;
unsigned int paprd_training_power;
@@ -990,6 +991,7 @@ void ar9002_hw_enable_wep_aggregation(struct ath_hw *ah);
void ar9003_hw_bb_watchdog_config(struct ath_hw *ah);
void ar9003_hw_bb_watchdog_read(struct ath_hw *ah);
void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah);
+void ar9003_hw_disable_phy_restart(struct ath_hw *ah);
void ar9003_paprd_enable(struct ath_hw *ah, bool val);
void ar9003_paprd_populate_single_table(struct ath_hw *ah,
struct ath9k_hw_cal_data *caldata,
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index a198ee3..2ca351f 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -670,7 +670,8 @@ void ath9k_tasklet(unsigned long data)
u32 status = sc->intrstatus;
u32 rxmask;
- if (status & ATH9K_INT_FATAL) {
+ if ((status & ATH9K_INT_FATAL) ||
+ (status & ATH9K_INT_BB_WATCHDOG)) {
ath_reset(sc, true);
return;
}
@@ -737,6 +738,7 @@ irqreturn_t ath_isr(int irq, void *dev)
{
#define SCHED_INTR ( \
ATH9K_INT_FATAL | \
+ ATH9K_INT_BB_WATCHDOG | \
ATH9K_INT_RXORN | \
ATH9K_INT_RXEOL | \
ATH9K_INT_RX | \
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 1754221..ba7f36a 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -689,7 +689,8 @@ static void ath_rc_rate_set_series(const struct ath_rate_table *rate_table,
if (WLAN_RC_PHY_HT(rate_table->info[rix].phy)) {
rate->flags |= IEEE80211_TX_RC_MCS;
- if (WLAN_RC_PHY_40(rate_table->info[rix].phy))
+ if (WLAN_RC_PHY_40(rate_table->info[rix].phy) &&
+ conf_is_ht40(&txrc->hw->conf))
rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
if (WLAN_RC_PHY_SGI(rate_table->info[rix].phy))
rate->flags |= IEEE80211_TX_RC_SHORT_GI;
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index 9ed6515..05960dd 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -3093,7 +3093,7 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
int freq;
bool avoid = false;
u8 length;
- u16 tmp, core, type, count, max, numb, last, cmd;
+ u16 tmp, core, type, count, max, numb, last = 0, cmd;
const u16 *table;
bool phy6or5x;
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-lib.c b/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
index 7e5e85a..a7a4739 100644
--- a/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
@@ -628,11 +628,11 @@ void iwl4965_rx_reply_rx(struct iwl_priv *priv,
/* rx_status carries information about the packet to mac80211 */
rx_status.mactime = le64_to_cpu(phy_res->timestamp);
+ rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
+ IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
rx_status.freq =
ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
rx_status.band);
- rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
- IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
rx_status.rate_idx =
iwl4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
rx_status.flag = 0;
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.c b/drivers/net/wireless/iwlegacy/iwl-4965.c
index f5433c7..f9db25b 100644
--- a/drivers/net/wireless/iwlegacy/iwl-4965.c
+++ b/drivers/net/wireless/iwlegacy/iwl-4965.c
@@ -1543,7 +1543,7 @@ static void iwl4965_temperature_calib(struct iwl_priv *priv)
s32 temp;
temp = iwl4965_hw_get_temperature(priv);
- if (temp < 0)
+ if (IWL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(temp))
return;
if (priv->temperature != temp) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index f8c710d..fda6fe0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -603,19 +603,27 @@ struct iwl_cfg iwl6050_2abg_cfg = {
IWL_DEVICE_6050,
};
+#define IWL_DEVICE_6150 \
+ .fw_name_pre = IWL6050_FW_PRE, \
+ .ucode_api_max = IWL6050_UCODE_API_MAX, \
+ .ucode_api_min = IWL6050_UCODE_API_MIN, \
+ .ops = &iwl6150_ops, \
+ .eeprom_ver = EEPROM_6150_EEPROM_VERSION, \
+ .eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION, \
+ .base_params = &iwl6050_base_params, \
+ .need_dc_calib = true, \
+ .led_mode = IWL_LED_BLINK, \
+ .internal_wimax_coex = true
+
struct iwl_cfg iwl6150_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN",
- .fw_name_pre = IWL6050_FW_PRE,
- .ucode_api_max = IWL6050_UCODE_API_MAX,
- .ucode_api_min = IWL6050_UCODE_API_MIN,
- .eeprom_ver = EEPROM_6150_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION,
- .ops = &iwl6150_ops,
- .base_params = &iwl6050_base_params,
+ IWL_DEVICE_6150,
.ht_params = &iwl6000_ht_params,
- .need_dc_calib = true,
- .led_mode = IWL_LED_RF_STATE,
- .internal_wimax_coex = true,
+};
+
+struct iwl_cfg iwl6150_bg_cfg = {
+ .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BG",
+ IWL_DEVICE_6150,
};
struct iwl_cfg iwl6000_3agn_cfg = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 11c6c11..a662adc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -3831,11 +3831,11 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
/* 6150 WiFi/WiMax Series */
{IWL_PCI_DEVICE(0x0885, 0x1305, iwl6150_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0885, 0x1306, iwl6150_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0885, 0x1307, iwl6150_bg_cfg)},
{IWL_PCI_DEVICE(0x0885, 0x1325, iwl6150_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0885, 0x1326, iwl6150_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0885, 0x1327, iwl6150_bg_cfg)},
{IWL_PCI_DEVICE(0x0886, 0x1315, iwl6150_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0886, 0x1316, iwl6150_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0886, 0x1317, iwl6150_bg_cfg)},
/* 1000 Series WiFi */
{IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_bgn_cfg)},
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
index 2495fe7..d171684 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -89,6 +89,7 @@ extern struct iwl_cfg iwl6000_3agn_cfg;
extern struct iwl_cfg iwl6050_2agn_cfg;
extern struct iwl_cfg iwl6050_2abg_cfg;
extern struct iwl_cfg iwl6150_bgn_cfg;
+extern struct iwl_cfg iwl6150_bg_cfg;
extern struct iwl_cfg iwl1000_bgn_cfg;
extern struct iwl_cfg iwl1000_bg_cfg;
extern struct iwl_cfg iwl100_bgn_cfg;
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index 84566db..71c8f3f 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -994,6 +994,8 @@ static void lbs_submit_command(struct lbs_private *priv,
cmd = cmdnode->cmdbuf;
spin_lock_irqsave(&priv->driver_lock, flags);
+ priv->seqnum++;
+ cmd->seqnum = cpu_to_le16(priv->seqnum);
priv->cur_cmd = cmdnode;
spin_unlock_irqrestore(&priv->driver_lock, flags);
@@ -1621,11 +1623,9 @@ struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv,
/* Copy the incoming command to the buffer */
memcpy(cmdnode->cmdbuf, in_cmd, in_cmd_size);
- /* Set sequence number, clean result, move to buffer */
- priv->seqnum++;
+ /* Set command, clean result, move to buffer */
cmdnode->cmdbuf->command = cpu_to_le16(command);
cmdnode->cmdbuf->size = cpu_to_le16(in_cmd_size);
- cmdnode->cmdbuf->seqnum = cpu_to_le16(priv->seqnum);
cmdnode->cmdbuf->result = 0;
lbs_deb_host("PREP_CMD: command 0x%04x\n", command);
diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h
index a0e9bc5..4e97e90 100644
--- a/drivers/net/wireless/mwifiex/sdio.h
+++ b/drivers/net/wireless/mwifiex/sdio.h
@@ -167,8 +167,8 @@
/* Rx unit register */
#define CARD_RX_UNIT_REG 0x63
-/* Event header Len*/
-#define MWIFIEX_EVENT_HEADER_LEN 8
+/* Event header len w/o 4 bytes of interface header */
+#define MWIFIEX_EVENT_HEADER_LEN 4
/* Max retry number of CMD53 write */
#define MAX_WRITE_IOMEM_RETRY 2
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 9def1e5..b2f8b8f 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -166,7 +166,6 @@ config RT2800USB_RT35XX
config RT2800USB_RT53XX
bool "rt2800usb - Include support for rt53xx devices (EXPERIMENTAL)"
depends on EXPERIMENTAL
- default y
---help---
This adds support for rt53xx wireless chipset family to the
rt2800pci driver.
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index a409528..89100e7 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -669,11 +669,6 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
&rx_status,
(u8 *) pdesc, skb);
- pci_unmap_single(rtlpci->pdev,
- *((dma_addr_t *) skb->cb),
- rtlpci->rxbuffersize,
- PCI_DMA_FROMDEVICE);
-
skb_put(skb, rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
false,
HW_DESC_RXPKT_LEN));
@@ -690,6 +685,21 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
hdr = rtl_get_hdr(skb);
fc = rtl_get_fc(skb);
+ /* try for new buffer - if allocation fails, drop
+ * frame and reuse old buffer
+ */
+ new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
+ if (unlikely(!new_skb)) {
+ RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV),
+ DBG_DMESG,
+ ("can't alloc skb for rx\n"));
+ goto done;
+ }
+ pci_unmap_single(rtlpci->pdev,
+ *((dma_addr_t *) skb->cb),
+ rtlpci->rxbuffersize,
+ PCI_DMA_FROMDEVICE);
+
if (!stats.crc || !stats.hwerror) {
memcpy(IEEE80211_SKB_RXCB(skb), &rx_status,
sizeof(rx_status));
@@ -758,15 +768,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
rtl_lps_leave(hw);
}
- new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
- if (unlikely(!new_skb)) {
- RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV),
- DBG_DMESG,
- ("can't alloc skb for rx\n"));
- goto done;
- }
skb = new_skb;
- /*skb->dev = dev; */
rtlpci->rx_ring[rx_queue_idx].rx_buf[rtlpci->
rx_ring
@@ -1113,6 +1115,13 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw)
rtlpci->rx_ring[rx_queue_idx].idx = 0;
+ /* If amsdu_8k is disabled, set buffersize to 4096. This
+ * change will reduce memory fragmentation.
+ */
+ if (rtlpci->rxbuffersize > 4096 &&
+ rtlpriv->rtlhal.disable_amsdu_8k)
+ rtlpci->rxbuffersize = 4096;
+
for (i = 0; i < rtlpci->rxringcount; i++) {
struct sk_buff *skb =
dev_alloc_skb(rtlpci->rxbuffersize);
diff --git a/drivers/net/wireless/wl12xx/conf.h b/drivers/net/wireless/wl12xx/conf.h
index 1ab6c86..c83fefb 100644
--- a/drivers/net/wireless/wl12xx/conf.h
+++ b/drivers/net/wireless/wl12xx/conf.h
@@ -1157,6 +1157,9 @@ struct conf_sched_scan_settings {
/* time to wait on the channel for passive scans (in TUs) */
u32 dwell_time_passive;
+ /* time to wait on the channel for DFS scans (in TUs) */
+ u32 dwell_time_dfs;
+
/* number of probe requests to send on each channel in active scans */
u8 num_probe_reqs;
diff --git a/drivers/net/wireless/wl12xx/main.c b/drivers/net/wireless/wl12xx/main.c
index bc00e52..e6497dc 100644
--- a/drivers/net/wireless/wl12xx/main.c
+++ b/drivers/net/wireless/wl12xx/main.c
@@ -311,6 +311,7 @@ static struct conf_drv_settings default_conf = {
.min_dwell_time_active = 8,
.max_dwell_time_active = 30,
.dwell_time_passive = 100,
+ .dwell_time_dfs = 150,
.num_probe_reqs = 2,
.rssi_threshold = -90,
.snr_threshold = 0,
diff --git a/drivers/net/wireless/wl12xx/scan.c b/drivers/net/wireless/wl12xx/scan.c
index f37e5a3..56f76ab 100644
--- a/drivers/net/wireless/wl12xx/scan.c
+++ b/drivers/net/wireless/wl12xx/scan.c
@@ -331,16 +331,22 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
struct conf_sched_scan_settings *c = &wl->conf.sched_scan;
int i, j;
u32 flags;
+ bool force_passive = !req->n_ssids;
for (i = 0, j = start;
i < req->n_channels && j < MAX_CHANNELS_ALL_BANDS;
i++) {
flags = req->channels[i]->flags;
- if (!(flags & IEEE80211_CHAN_DISABLED) &&
- ((flags & IEEE80211_CHAN_PASSIVE_SCAN) == passive) &&
- ((flags & IEEE80211_CHAN_RADAR) == radar) &&
- (req->channels[i]->band == band)) {
+ if (force_passive)
+ flags |= IEEE80211_CHAN_PASSIVE_SCAN;
+
+ if ((req->channels[i]->band == band) &&
+ !(flags & IEEE80211_CHAN_DISABLED) &&
+ (!!(flags & IEEE80211_CHAN_RADAR) == radar) &&
+ /* if radar is set, we ignore the passive flag */
+ (radar ||
+ !!(flags & IEEE80211_CHAN_PASSIVE_SCAN) == passive)) {
wl1271_debug(DEBUG_SCAN, "band %d, center_freq %d ",
req->channels[i]->band,
req->channels[i]->center_freq);
@@ -350,7 +356,12 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
wl1271_debug(DEBUG_SCAN, "max_power %d",
req->channels[i]->max_power);
- if (flags & IEEE80211_CHAN_PASSIVE_SCAN) {
+ if (flags & IEEE80211_CHAN_RADAR) {
+ channels[j].flags |= SCAN_CHANNEL_FLAGS_DFS;
+ channels[j].passive_duration =
+ cpu_to_le16(c->dwell_time_dfs);
+ }
+ else if (flags & IEEE80211_CHAN_PASSIVE_SCAN) {
channels[j].passive_duration =
cpu_to_le16(c->dwell_time_passive);
} else {
@@ -359,7 +370,7 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
channels[j].max_duration =
cpu_to_le16(c->max_dwell_time_active);
}
- channels[j].tx_power_att = req->channels[j]->max_power;
+ channels[j].tx_power_att = req->channels[i]->max_power;
channels[j].channel = req->channels[i]->hw_value;
j++;
@@ -386,7 +397,11 @@ wl1271_scan_sched_scan_channels(struct wl1271 *wl,
wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels,
IEEE80211_BAND_2GHZ,
false, false, idx);
- idx += cfg->active[0];
+ /*
+ * 5GHz channels always start at position 14, not immediately
+ * after the last 2.4GHz channel
+ */
+ idx = 14;
cfg->passive[1] =
wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels,
@@ -394,22 +409,23 @@ wl1271_scan_sched_scan_channels(struct wl1271 *wl,
false, true, idx);
idx += cfg->passive[1];
- cfg->active[1] =
+ cfg->dfs =
wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels,
IEEE80211_BAND_5GHZ,
- false, false, 14);
- idx += cfg->active[1];
+ true, true, idx);
+ idx += cfg->dfs;
- cfg->dfs =
+ cfg->active[1] =
wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels,
IEEE80211_BAND_5GHZ,
- true, false, idx);
- idx += cfg->dfs;
+ false, false, idx);
+ idx += cfg->active[1];
wl1271_debug(DEBUG_SCAN, " 2.4GHz: active %d passive %d",
cfg->active[0], cfg->passive[0]);
wl1271_debug(DEBUG_SCAN, " 5GHz: active %d passive %d",
cfg->active[1], cfg->passive[1]);
+ wl1271_debug(DEBUG_SCAN, " DFS: %d", cfg->dfs);
return idx;
}
@@ -421,6 +437,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
struct wl1271_cmd_sched_scan_config *cfg = NULL;
struct conf_sched_scan_settings *c = &wl->conf.sched_scan;
int i, total_channels, ret;
+ bool force_passive = !req->n_ssids;
wl1271_debug(DEBUG_CMD, "cmd sched_scan scan config");
@@ -444,7 +461,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
for (i = 0; i < SCAN_MAX_CYCLE_INTERVALS; i++)
cfg->intervals[i] = cpu_to_le32(req->interval);
- if (req->ssids[0].ssid_len && req->ssids[0].ssid) {
+ if (!force_passive && req->ssids[0].ssid_len && req->ssids[0].ssid) {
cfg->filter_type = SCAN_SSID_FILTER_SPECIFIC;
cfg->ssid_len = req->ssids[0].ssid_len;
memcpy(cfg->ssid, req->ssids[0].ssid,
@@ -461,7 +478,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
goto out;
}
- if (cfg->active[0]) {
+ if (!force_passive && cfg->active[0]) {
ret = wl1271_cmd_build_probe_req(wl, req->ssids[0].ssid,
req->ssids[0].ssid_len,
ies->ie[IEEE80211_BAND_2GHZ],
@@ -473,7 +490,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
}
}
- if (cfg->active[1]) {
+ if (!force_passive && cfg->active[1]) {
ret = wl1271_cmd_build_probe_req(wl, req->ssids[0].ssid,
req->ssids[0].ssid_len,
ies->ie[IEEE80211_BAND_5GHZ],
diff --git a/drivers/net/wireless/wl12xx/scan.h b/drivers/net/wireless/wl12xx/scan.h
index c833195..a0b6c5d 100644
--- a/drivers/net/wireless/wl12xx/scan.h
+++ b/drivers/net/wireless/wl12xx/scan.h
@@ -137,6 +137,9 @@ enum {
SCAN_BSS_TYPE_ANY,
};
+#define SCAN_CHANNEL_FLAGS_DFS BIT(0)
+#define SCAN_CHANNEL_FLAGS_DFS_ENABLED BIT(1)
+
struct conn_scan_ch_params {
__le16 min_duration;
__le16 max_duration;
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 0e81994..631194d 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -1533,6 +1533,31 @@ static void __exit usb_exit(void)
module_init(usb_init);
module_exit(usb_exit);
+static int zd_ep_regs_out_msg(struct usb_device *udev, void *data, int len,
+ int *actual_length, int timeout)
+{
+ /* In USB 2.0 mode EP_REGS_OUT endpoint is interrupt type. However in
+ * USB 1.1 mode endpoint is bulk. Select correct type URB by endpoint
+ * descriptor.
+ */
+ struct usb_host_endpoint *ep;
+ unsigned int pipe;
+
+ pipe = usb_sndintpipe(udev, EP_REGS_OUT);
+ ep = usb_pipe_endpoint(udev, pipe);
+ if (!ep)
+ return -EINVAL;
+
+ if (usb_endpoint_xfer_int(&ep->desc)) {
+ return usb_interrupt_msg(udev, pipe, data, len,
+ actual_length, timeout);
+ } else {
+ pipe = usb_sndbulkpipe(udev, EP_REGS_OUT);
+ return usb_bulk_msg(udev, pipe, data, len, actual_length,
+ timeout);
+ }
+}
+
static int usb_int_regs_length(unsigned int count)
{
return sizeof(struct usb_int_regs) + count * sizeof(struct reg_data);
@@ -1648,15 +1673,14 @@ int zd_usb_ioread16v(struct zd_usb *usb, u16 *values,
udev = zd_usb_to_usbdev(usb);
prepare_read_regs_int(usb);
- r = usb_interrupt_msg(udev, usb_sndintpipe(udev, EP_REGS_OUT),
- req, req_len, &actual_req_len, 50 /* ms */);
+ r = zd_ep_regs_out_msg(udev, req, req_len, &actual_req_len, 50 /*ms*/);
if (r) {
dev_dbg_f(zd_usb_dev(usb),
- "error in usb_interrupt_msg(). Error number %d\n", r);
+ "error in zd_ep_regs_out_msg(). Error number %d\n", r);
goto error;
}
if (req_len != actual_req_len) {
- dev_dbg_f(zd_usb_dev(usb), "error in usb_interrupt_msg()\n"
+ dev_dbg_f(zd_usb_dev(usb), "error in zd_ep_regs_out_msg()\n"
" req_len %d != actual_req_len %d\n",
req_len, actual_req_len);
r = -EIO;
@@ -1818,9 +1842,17 @@ int zd_usb_iowrite16v_async(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
rw->value = cpu_to_le16(ioreqs[i].value);
}
- usb_fill_int_urb(urb, udev, usb_sndintpipe(udev, EP_REGS_OUT),
- req, req_len, iowrite16v_urb_complete, usb,
- ep->desc.bInterval);
+ /* In USB 2.0 mode endpoint is interrupt type. However in USB 1.1 mode
+ * endpoint is bulk. Select correct type URB by endpoint descriptor.
+ */
+ if (usb_endpoint_xfer_int(&ep->desc))
+ usb_fill_int_urb(urb, udev, usb_sndintpipe(udev, EP_REGS_OUT),
+ req, req_len, iowrite16v_urb_complete, usb,
+ ep->desc.bInterval);
+ else
+ usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, EP_REGS_OUT),
+ req, req_len, iowrite16v_urb_complete, usb);
+
urb->transfer_flags |= URB_FREE_BUFFER;
/* Submit previous URB */
@@ -1924,15 +1956,14 @@ int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits)
}
udev = zd_usb_to_usbdev(usb);
- r = usb_interrupt_msg(udev, usb_sndintpipe(udev, EP_REGS_OUT),
- req, req_len, &actual_req_len, 50 /* ms */);
+ r = zd_ep_regs_out_msg(udev, req, req_len, &actual_req_len, 50 /*ms*/);
if (r) {
dev_dbg_f(zd_usb_dev(usb),
- "error in usb_interrupt_msg(). Error number %d\n", r);
+ "error in zd_ep_regs_out_msg(). Error number %d\n", r);
goto out;
}
if (req_len != actual_req_len) {
- dev_dbg_f(zd_usb_dev(usb), "error in usb_interrupt_msg()"
+ dev_dbg_f(zd_usb_dev(usb), "error in zd_ep_regs_out_msg()"
" req_len %d != actual_req_len %d\n",
req_len, actual_req_len);
r = -EIO;
diff --git a/drivers/rtc/rtc-m41t93.c b/drivers/rtc/rtc-m41t93.c
index 1a84b3e..7317d3b 100644
--- a/drivers/rtc/rtc-m41t93.c
+++ b/drivers/rtc/rtc-m41t93.c
@@ -189,7 +189,7 @@ static int __devinit m41t93_probe(struct spi_device *spi)
static int __devexit m41t93_remove(struct spi_device *spi)
{
- struct rtc_device *rtc = platform_get_drvdata(spi);
+ struct rtc_device *rtc = spi_get_drvdata(spi);
if (rtc)
rtc_device_unregister(rtc);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 58584dc..44e8ca3 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -297,7 +297,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
kfree(sdev);
goto out;
}
-
+ blk_get_queue(sdev->request_queue);
sdev->request_queue->queuedata = sdev;
scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index e639125..e0bd3f7 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -322,6 +322,7 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
kfree(evt);
}
+ blk_put_queue(sdev->request_queue);
/* NULL queue means the device can't be used */
sdev->request_queue = NULL;
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c
index d357007..1cf6454 100644
--- a/drivers/spi/spi-ep93xx.c
+++ b/drivers/spi/spi-ep93xx.c
@@ -1,7 +1,7 @@
/*
* Driver for Cirrus Logic EP93xx SPI controller.
*
- * Copyright (c) 2010 Mika Westerberg
+ * Copyright (C) 2010-2011 Mika Westerberg
*
* Explicit FIFO handling code was inspired by amba-pl022 driver.
*
@@ -21,13 +21,16 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/dmaengine.h>
#include <linux/bitops.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/workqueue.h>
#include <linux/sched.h>
+#include <linux/scatterlist.h>
#include <linux/spi/spi.h>
+#include <mach/dma.h>
#include <mach/ep93xx_spi.h>
#define SSPCR0 0x0000
@@ -71,6 +74,7 @@
* @pdev: pointer to platform device
* @clk: clock for the controller
* @regs_base: pointer to ioremap()'d registers
+ * @sspdr_phys: physical address of the SSPDR register
* @irq: IRQ number used by the driver
* @min_rate: minimum clock rate (in Hz) supported by the controller
* @max_rate: maximum clock rate (in Hz) supported by the controller
@@ -84,6 +88,14 @@
* @rx: current byte in transfer to receive
* @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one
* frame decreases this level and sending one frame increases it.
+ * @dma_rx: RX DMA channel
+ * @dma_tx: TX DMA channel
+ * @dma_rx_data: RX parameters passed to the DMA engine
+ * @dma_tx_data: TX parameters passed to the DMA engine
+ * @rx_sgt: sg table for RX transfers
+ * @tx_sgt: sg table for TX transfers
+ * @zeropage: dummy page used as RX buffer when only TX buffer is passed in by
+ * the client
*
* This structure holds EP93xx SPI controller specific information. When
* @running is %true, driver accepts transfer requests from protocol drivers.
@@ -100,6 +112,7 @@ struct ep93xx_spi {
const struct platform_device *pdev;
struct clk *clk;
void __iomem *regs_base;
+ unsigned long sspdr_phys;
int irq;
unsigned long min_rate;
unsigned long max_rate;
@@ -112,6 +125,13 @@ struct ep93xx_spi {
size_t tx;
size_t rx;
size_t fifo_level;
+ struct dma_chan *dma_rx;
+ struct dma_chan *dma_tx;
+ struct ep93xx_dma_data dma_rx_data;
+ struct ep93xx_dma_data dma_tx_data;
+ struct sg_table rx_sgt;
+ struct sg_table tx_sgt;
+ void *zeropage;
};
/**
@@ -496,14 +516,195 @@ static int ep93xx_spi_read_write(struct ep93xx_spi *espi)
espi->fifo_level++;
}
- if (espi->rx == t->len) {
- msg->actual_length += t->len;
+ if (espi->rx == t->len)
return 0;
- }
return -EINPROGRESS;
}
+static void ep93xx_spi_pio_transfer(struct ep93xx_spi *espi)
+{
+ /*
+ * Now everything is set up for the current transfer. We prime the TX
+ * FIFO, enable interrupts, and wait for the transfer to complete.
+ */
+ if (ep93xx_spi_read_write(espi)) {
+ ep93xx_spi_enable_interrupts(espi);
+ wait_for_completion(&espi->wait);
+ }
+}
+
+/**
+ * ep93xx_spi_dma_prepare() - prepares a DMA transfer
+ * @espi: ep93xx SPI controller struct
+ * @dir: DMA transfer direction
+ *
+ * Function configures the DMA, maps the buffer and prepares the DMA
+ * descriptor. Returns a valid DMA descriptor in case of success and ERR_PTR
+ * in case of failure.
+ */
+static struct dma_async_tx_descriptor *
+ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
+{
+ struct spi_transfer *t = espi->current_msg->state;
+ struct dma_async_tx_descriptor *txd;
+ enum dma_slave_buswidth buswidth;
+ struct dma_slave_config conf;
+ struct scatterlist *sg;
+ struct sg_table *sgt;
+ struct dma_chan *chan;
+ const void *buf, *pbuf;
+ size_t len = t->len;
+ int i, ret, nents;
+
+ if (bits_per_word(espi) > 8)
+ buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ else
+ buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
+
+ memset(&conf, 0, sizeof(conf));
+ conf.direction = dir;
+
+ if (dir == DMA_FROM_DEVICE) {
+ chan = espi->dma_rx;
+ buf = t->rx_buf;
+ sgt = &espi->rx_sgt;
+
+ conf.src_addr = espi->sspdr_phys;
+ conf.src_addr_width = buswidth;
+ } else {
+ chan = espi->dma_tx;
+ buf = t->tx_buf;
+ sgt = &espi->tx_sgt;
+
+ conf.dst_addr = espi->sspdr_phys;
+ conf.dst_addr_width = buswidth;
+ }
+
+ ret = dmaengine_slave_config(chan, &conf);
+ if (ret)
+ return ERR_PTR(ret);
+
+ /*
+ * We need to split the transfer into PAGE_SIZE'd chunks. This is
+ * because we are using @espi->zeropage to provide a zero RX buffer
+ * for the TX transfers and we have only allocated one page for that.
+ *
+ * For performance reasons we allocate a new sg_table only when
+ * needed. Otherwise we will re-use the current one. Eventually the
+ * last sg_table is released in ep93xx_spi_release_dma().
+ */
+
+ nents = DIV_ROUND_UP(len, PAGE_SIZE);
+ if (nents != sgt->nents) {
+ sg_free_table(sgt);
+
+ ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ pbuf = buf;
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ size_t bytes = min_t(size_t, len, PAGE_SIZE);
+
+ if (buf) {
+ sg_set_page(sg, virt_to_page(pbuf), bytes,
+ offset_in_page(pbuf));
+ } else {
+ sg_set_page(sg, virt_to_page(espi->zeropage),
+ bytes, 0);
+ }
+
+ pbuf += bytes;
+ len -= bytes;
+ }
+
+ if (WARN_ON(len)) {
+ dev_warn(&espi->pdev->dev, "len = %d expected 0!", len);
+ return ERR_PTR(-EINVAL);
+ }
+
+ nents = dma_map_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
+ if (!nents)
+ return ERR_PTR(-ENOMEM);
+
+ txd = chan->device->device_prep_slave_sg(chan, sgt->sgl, nents,
+ dir, DMA_CTRL_ACK);
+ if (!txd) {
+ dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
+ return ERR_PTR(-ENOMEM);
+ }
+ return txd;
+}
+
+/**
+ * ep93xx_spi_dma_finish() - finishes with a DMA transfer
+ * @espi: ep93xx SPI controller struct
+ * @dir: DMA transfer direction
+ *
+ * Function finishes with the DMA transfer. After this, the DMA buffer is
+ * unmapped.
+ */
+static void ep93xx_spi_dma_finish(struct ep93xx_spi *espi,
+ enum dma_data_direction dir)
+{
+ struct dma_chan *chan;
+ struct sg_table *sgt;
+
+ if (dir == DMA_FROM_DEVICE) {
+ chan = espi->dma_rx;
+ sgt = &espi->rx_sgt;
+ } else {
+ chan = espi->dma_tx;
+ sgt = &espi->tx_sgt;
+ }
+
+ dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
+}
+
+static void ep93xx_spi_dma_callback(void *callback_param)
+{
+ complete(callback_param);
+}
+
+static void ep93xx_spi_dma_transfer(struct ep93xx_spi *espi)
+{
+ struct spi_message *msg = espi->current_msg;
+ struct dma_async_tx_descriptor *rxd, *txd;
+
+ rxd = ep93xx_spi_dma_prepare(espi, DMA_FROM_DEVICE);
+ if (IS_ERR(rxd)) {
+ dev_err(&espi->pdev->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd));
+ msg->status = PTR_ERR(rxd);
+ return;
+ }
+
+ txd = ep93xx_spi_dma_prepare(espi, DMA_TO_DEVICE);
+ if (IS_ERR(txd)) {
+ ep93xx_spi_dma_finish(espi, DMA_FROM_DEVICE);
+ dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(rxd));
+ msg->status = PTR_ERR(txd);
+ return;
+ }
+
+ /* We are ready when RX is done */
+ rxd->callback = ep93xx_spi_dma_callback;
+ rxd->callback_param = &espi->wait;
+
+ /* Now submit both descriptors and wait while they finish */
+ dmaengine_submit(rxd);
+ dmaengine_submit(txd);
+
+ dma_async_issue_pending(espi->dma_rx);
+ dma_async_issue_pending(espi->dma_tx);
+
+ wait_for_completion(&espi->wait);
+
+ ep93xx_spi_dma_finish(espi, DMA_TO_DEVICE);
+ ep93xx_spi_dma_finish(espi, DMA_FROM_DEVICE);
+}
+
/**
* ep93xx_spi_process_transfer() - processes one SPI transfer
* @espi: ep93xx SPI controller struct
@@ -556,13 +757,14 @@ static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi,
espi->tx = 0;
/*
- * Now everything is set up for the current transfer. We prime the TX
- * FIFO, enable interrupts, and wait for the transfer to complete.
+ * There is no point of setting up DMA for the transfers which will
+ * fit into the FIFO and can be transferred with a single interrupt.
+ * So in these cases we will be using PIO and don't bother for DMA.
*/
- if (ep93xx_spi_read_write(espi)) {
- ep93xx_spi_enable_interrupts(espi);
- wait_for_completion(&espi->wait);
- }
+ if (espi->dma_rx && t->len > SPI_FIFO_SIZE)
+ ep93xx_spi_dma_transfer(espi);
+ else
+ ep93xx_spi_pio_transfer(espi);
/*
* In case of error during transmit, we bail out from processing
@@ -571,6 +773,8 @@ static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi,
if (msg->status)
return;
+ msg->actual_length += t->len;
+
/*
* After this transfer is finished, perform any possible
* post-transfer actions requested by the protocol driver.
@@ -752,6 +956,75 @@ static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static bool ep93xx_spi_dma_filter(struct dma_chan *chan, void *filter_param)
+{
+ if (ep93xx_dma_chan_is_m2p(chan))
+ return false;
+
+ chan->private = filter_param;
+ return true;
+}
+
+static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi)
+{
+ dma_cap_mask_t mask;
+ int ret;
+
+ espi->zeropage = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!espi->zeropage)
+ return -ENOMEM;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ espi->dma_rx_data.port = EP93XX_DMA_SSP;
+ espi->dma_rx_data.direction = DMA_FROM_DEVICE;
+ espi->dma_rx_data.name = "ep93xx-spi-rx";
+
+ espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter,
+ &espi->dma_rx_data);
+ if (!espi->dma_rx) {
+ ret = -ENODEV;
+ goto fail_free_page;
+ }
+
+ espi->dma_tx_data.port = EP93XX_DMA_SSP;
+ espi->dma_tx_data.direction = DMA_TO_DEVICE;
+ espi->dma_tx_data.name = "ep93xx-spi-tx";
+
+ espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter,
+ &espi->dma_tx_data);
+ if (!espi->dma_tx) {
+ ret = -ENODEV;
+ goto fail_release_rx;
+ }
+
+ return 0;
+
+fail_release_rx:
+ dma_release_channel(espi->dma_rx);
+ espi->dma_rx = NULL;
+fail_free_page:
+ free_page((unsigned long)espi->zeropage);
+
+ return ret;
+}
+
+static void ep93xx_spi_release_dma(struct ep93xx_spi *espi)
+{
+ if (espi->dma_rx) {
+ dma_release_channel(espi->dma_rx);
+ sg_free_table(&espi->rx_sgt);
+ }
+ if (espi->dma_tx) {
+ dma_release_channel(espi->dma_tx);
+ sg_free_table(&espi->tx_sgt);
+ }
+
+ if (espi->zeropage)
+ free_page((unsigned long)espi->zeropage);
+}
+
static int __init ep93xx_spi_probe(struct platform_device *pdev)
{
struct spi_master *master;
@@ -818,6 +1091,7 @@ static int __init ep93xx_spi_probe(struct platform_device *pdev)
goto fail_put_clock;
}
+ espi->sspdr_phys = res->start + SSPDR;
espi->regs_base = ioremap(res->start, resource_size(res));
if (!espi->regs_base) {
dev_err(&pdev->dev, "failed to map resources\n");
@@ -832,10 +1106,13 @@ static int __init ep93xx_spi_probe(struct platform_device *pdev)
goto fail_unmap_regs;
}
+ if (info->use_dma && ep93xx_spi_setup_dma(espi))
+ dev_warn(&pdev->dev, "DMA setup failed. Falling back to PIO\n");
+
espi->wq = create_singlethread_workqueue("ep93xx_spid");
if (!espi->wq) {
dev_err(&pdev->dev, "unable to create workqueue\n");
- goto fail_free_irq;
+ goto fail_free_dma;
}
INIT_WORK(&espi->msg_work, ep93xx_spi_work);
INIT_LIST_HEAD(&espi->msg_queue);
@@ -857,7 +1134,8 @@ static int __init ep93xx_spi_probe(struct platform_device *pdev)
fail_free_queue:
destroy_workqueue(espi->wq);
-fail_free_irq:
+fail_free_dma:
+ ep93xx_spi_release_dma(espi);
free_irq(espi->irq, espi);
fail_unmap_regs:
iounmap(espi->regs_base);
@@ -901,6 +1179,7 @@ static int __exit ep93xx_spi_remove(struct platform_device *pdev)
}
spin_unlock_irq(&espi->lock);
+ ep93xx_spi_release_dma(espi);
free_irq(espi->irq, espi);
iounmap(espi->regs_base);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index a4c42a7..09e8c7d 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -2128,8 +2128,8 @@ static void gsmld_detach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
gsm->tty = NULL;
}
-static unsigned int gsmld_receive_buf(struct tty_struct *tty,
- const unsigned char *cp, char *fp, int count)
+static void gsmld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count)
{
struct gsm_mux *gsm = tty->disc_data;
const unsigned char *dp;
@@ -2162,8 +2162,6 @@ static unsigned int gsmld_receive_buf(struct tty_struct *tty,
}
/* FASYNC if needed ? */
/* If clogged call tty_throttle(tty); */
-
- return count;
}
/**
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index cac6663..cea5603 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -188,8 +188,8 @@ static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
poll_table *wait);
static int n_hdlc_tty_open(struct tty_struct *tty);
static void n_hdlc_tty_close(struct tty_struct *tty);
-static unsigned int n_hdlc_tty_receive(struct tty_struct *tty,
- const __u8 *cp, char *fp, int count);
+static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *cp,
+ char *fp, int count);
static void n_hdlc_tty_wakeup(struct tty_struct *tty);
#define bset(p,b) ((p)[(b) >> 5] |= (1 << ((b) & 0x1f)))
@@ -509,8 +509,8 @@ static void n_hdlc_tty_wakeup(struct tty_struct *tty)
* Called by tty low level driver when receive data is available. Data is
* interpreted as one HDLC frame.
*/
-static unsigned int n_hdlc_tty_receive(struct tty_struct *tty,
- const __u8 *data, char *flags, int count)
+static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *data,
+ char *flags, int count)
{
register struct n_hdlc *n_hdlc = tty2n_hdlc (tty);
register struct n_hdlc_buf *buf;
@@ -521,20 +521,20 @@ static unsigned int n_hdlc_tty_receive(struct tty_struct *tty,
/* This can happen if stuff comes in on the backup tty */
if (!n_hdlc || tty != n_hdlc->tty)
- return -ENODEV;
+ return;
/* verify line is using HDLC discipline */
if (n_hdlc->magic != HDLC_MAGIC) {
printk("%s(%d) line not using HDLC discipline\n",
__FILE__,__LINE__);
- return -EINVAL;
+ return;
}
if ( count>maxframe ) {
if (debuglevel >= DEBUG_LEVEL_INFO)
printk("%s(%d) rx count>maxframesize, data discarded\n",
__FILE__,__LINE__);
- return -EINVAL;
+ return;
}
/* get a free HDLC buffer */
@@ -550,7 +550,7 @@ static unsigned int n_hdlc_tty_receive(struct tty_struct *tty,
if (debuglevel >= DEBUG_LEVEL_INFO)
printk("%s(%d) no more rx buffers, data discarded\n",
__FILE__,__LINE__);
- return -EINVAL;
+ return;
}
/* copy received data to HDLC buffer */
@@ -565,8 +565,6 @@ static unsigned int n_hdlc_tty_receive(struct tty_struct *tty,
if (n_hdlc->tty->fasync != NULL)
kill_fasync (&n_hdlc->tty->fasync, SIGIO, POLL_IN);
- return count;
-
} /* end of n_hdlc_tty_receive() */
/**
diff --git a/drivers/tty/n_r3964.c b/drivers/tty/n_r3964.c
index a4bc39c..5c6c314 100644
--- a/drivers/tty/n_r3964.c
+++ b/drivers/tty/n_r3964.c
@@ -139,8 +139,8 @@ static int r3964_ioctl(struct tty_struct *tty, struct file *file,
static void r3964_set_termios(struct tty_struct *tty, struct ktermios *old);
static unsigned int r3964_poll(struct tty_struct *tty, struct file *file,
struct poll_table_struct *wait);
-static unsigned int r3964_receive_buf(struct tty_struct *tty,
- const unsigned char *cp, char *fp, int count);
+static void r3964_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count);
static struct tty_ldisc_ops tty_ldisc_N_R3964 = {
.owner = THIS_MODULE,
@@ -1239,8 +1239,8 @@ static unsigned int r3964_poll(struct tty_struct *tty, struct file *file,
return result;
}
-static unsigned int r3964_receive_buf(struct tty_struct *tty,
- const unsigned char *cp, char *fp, int count)
+static void r3964_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count)
{
struct r3964_info *pInfo = tty->disc_data;
const unsigned char *p;
@@ -1257,8 +1257,6 @@ static unsigned int r3964_receive_buf(struct tty_struct *tty,
}
}
-
- return count;
}
MODULE_LICENSE("GPL");
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 95d0a9c..0ad3288 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -81,6 +81,38 @@ static inline int tty_put_user(struct tty_struct *tty, unsigned char x,
return put_user(x, ptr);
}
+/**
+ * n_tty_set__room - receive space
+ * @tty: terminal
+ *
+ * Called by the driver to find out how much data it is
+ * permitted to feed to the line discipline without any being lost
+ * and thus to manage flow control. Not serialized. Answers for the
+ * "instant".
+ */
+
+static void n_tty_set_room(struct tty_struct *tty)
+{
+ /* tty->read_cnt is not read locked ? */
+ int left = N_TTY_BUF_SIZE - tty->read_cnt - 1;
+ int old_left;
+
+ /*
+ * If we are doing input canonicalization, and there are no
+ * pending newlines, let characters through without limit, so
+ * that erase characters will be handled. Other excess
+ * characters will be beeped.
+ */
+ if (left <= 0)
+ left = tty->icanon && !tty->canon_data;
+ old_left = tty->receive_room;
+ tty->receive_room = left;
+
+ /* Did this open up the receive buffer? We may need to flip */
+ if (left && !old_left)
+ schedule_work(&tty->buf.work);
+}
+
static void put_tty_queue_nolock(unsigned char c, struct tty_struct *tty)
{
if (tty->read_cnt < N_TTY_BUF_SIZE) {
@@ -152,6 +184,7 @@ static void reset_buffer_flags(struct tty_struct *tty)
tty->canon_head = tty->canon_data = tty->erasing = 0;
memset(&tty->read_flags, 0, sizeof tty->read_flags);
+ n_tty_set_room(tty);
check_unthrottle(tty);
}
@@ -1327,19 +1360,17 @@ static void n_tty_write_wakeup(struct tty_struct *tty)
* calls one at a time and in order (or using flush_to_ldisc)
*/
-static unsigned int n_tty_receive_buf(struct tty_struct *tty,
- const unsigned char *cp, char *fp, int count)
+static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count)
{
const unsigned char *p;
char *f, flags = TTY_NORMAL;
int i;
char buf[64];
unsigned long cpuflags;
- int left;
- int ret = 0;
if (!tty->read_buf)
- return 0;
+ return;
if (tty->real_raw) {
spin_lock_irqsave(&tty->read_lock, cpuflags);
@@ -1349,7 +1380,6 @@ static unsigned int n_tty_receive_buf(struct tty_struct *tty,
memcpy(tty->read_buf + tty->read_head, cp, i);
tty->read_head = (tty->read_head + i) & (N_TTY_BUF_SIZE-1);
tty->read_cnt += i;
- ret += i;
cp += i;
count -= i;
@@ -1359,10 +1389,8 @@ static unsigned int n_tty_receive_buf(struct tty_struct *tty,
memcpy(tty->read_buf + tty->read_head, cp, i);
tty->read_head = (tty->read_head + i) & (N_TTY_BUF_SIZE-1);
tty->read_cnt += i;
- ret += i;
spin_unlock_irqrestore(&tty->read_lock, cpuflags);
} else {
- ret = count;
for (i = count, p = cp, f = fp; i; i--, p++) {
if (f)
flags = *f++;
@@ -1390,6 +1418,8 @@ static unsigned int n_tty_receive_buf(struct tty_struct *tty,
tty->ops->flush_chars(tty);
}
+ n_tty_set_room(tty);
+
if ((!tty->icanon && (tty->read_cnt >= tty->minimum_to_wake)) ||
L_EXTPROC(tty)) {
kill_fasync(&tty->fasync, SIGIO, POLL_IN);
@@ -1402,12 +1432,8 @@ static unsigned int n_tty_receive_buf(struct tty_struct *tty,
* mode. We don't want to throttle the driver if we're in
* canonical mode and don't have a newline yet!
*/
- left = N_TTY_BUF_SIZE - tty->read_cnt - 1;
-
- if (left < TTY_THRESHOLD_THROTTLE)
+ if (tty->receive_room < TTY_THRESHOLD_THROTTLE)
tty_throttle(tty);
-
- return ret;
}
int is_ignored(int sig)
@@ -1451,6 +1477,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
if (test_bit(TTY_HW_COOK_IN, &tty->flags)) {
tty->raw = 1;
tty->real_raw = 1;
+ n_tty_set_room(tty);
return;
}
if (I_ISTRIP(tty) || I_IUCLC(tty) || I_IGNCR(tty) ||
@@ -1503,6 +1530,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
else
tty->real_raw = 0;
}
+ n_tty_set_room(tty);
/* The termios change make the tty ready for I/O */
wake_up_interruptible(&tty->write_wait);
wake_up_interruptible(&tty->read_wait);
@@ -1784,6 +1812,8 @@ do_it_again:
retval = -ERESTARTSYS;
break;
}
+ /* FIXME: does n_tty_set_room need locking ? */
+ n_tty_set_room(tty);
timeout = schedule_timeout(timeout);
continue;
}
@@ -1855,8 +1885,10 @@ do_it_again:
* longer than TTY_THRESHOLD_UNTHROTTLE in canonical mode,
* we won't get any more characters.
*/
- if (n_tty_chars_in_buffer(tty) <= TTY_THRESHOLD_UNTHROTTLE)
+ if (n_tty_chars_in_buffer(tty) <= TTY_THRESHOLD_UNTHROTTLE) {
+ n_tty_set_room(tty);
check_unthrottle(tty);
+ }
if (b - buf >= minimum)
break;
@@ -1878,6 +1910,7 @@ do_it_again:
} else if (test_and_clear_bit(TTY_PUSH, &tty->flags))
goto do_it_again;
+ n_tty_set_room(tty);
return retval;
}
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index 46de2e0..f1a7918 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -416,7 +416,6 @@ static void flush_to_ldisc(struct work_struct *work)
struct tty_buffer *head, *tail = tty->buf.tail;
int seen_tail = 0;
while ((head = tty->buf.head) != NULL) {
- int copied;
int count;
char *char_buf;
unsigned char *flag_buf;
@@ -443,19 +442,17 @@ static void flush_to_ldisc(struct work_struct *work)
line discipline as we want to empty the queue */
if (test_bit(TTY_FLUSHPENDING, &tty->flags))
break;
+ if (!tty->receive_room || seen_tail)
+ break;
+ if (count > tty->receive_room)
+ count = tty->receive_room;
char_buf = head->char_buf_ptr + head->read;
flag_buf = head->flag_buf_ptr + head->read;
+ head->read += count;
spin_unlock_irqrestore(&tty->buf.lock, flags);
- copied = disc->ops->receive_buf(tty, char_buf,
+ disc->ops->receive_buf(tty, char_buf,
flag_buf, count);
spin_lock_irqsave(&tty->buf.lock, flags);
-
- head->read += copied;
-
- if (copied == 0 || seen_tail) {
- schedule_work(&tty->buf.work);
- break;
- }
}
clear_bit(TTY_FLUSHING, &tty->flags);
}
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
index 67b1d0d..fb864e7 100644
--- a/drivers/tty/vt/selection.c
+++ b/drivers/tty/vt/selection.c
@@ -332,7 +332,8 @@ int paste_selection(struct tty_struct *tty)
continue;
}
count = sel_buffer_lth - pasted;
- count = tty->ldisc->ops->receive_buf(tty, sel_buffer + pasted,
+ count = min(count, tty->receive_room);
+ tty->ldisc->ops->receive_buf(tty, sel_buffer + pasted,
NULL, count);
pasted += count;
}