summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpica/aclocal.h7
-rw-r--r--drivers/acpi/acpica/evgpe.c17
-rw-r--r--drivers/acpi/acpica/evxfgpe.c42
-rw-r--r--drivers/acpi/debugfs.c20
-rw-r--r--drivers/atm/solos-pci.c5
-rw-r--r--drivers/block/floppy.c2
-rw-r--r--drivers/block/loop.c5
-rw-r--r--drivers/bluetooth/ath3k.c5
-rw-r--r--drivers/bluetooth/btusb.c12
-rw-r--r--drivers/char/agp/amd64-agp.c9
-rw-r--r--drivers/char/agp/intel-agp.h1
-rw-r--r--drivers/char/agp/intel-gtt.c56
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c3
-rw-r--r--drivers/char/pcmcia/ipwireless/main.c52
-rw-r--r--drivers/char/tpm/tpm.c28
-rw-r--r--drivers/char/tpm/tpm.h2
-rw-r--r--drivers/char/tpm/tpm_tis.c4
-rw-r--r--drivers/char/virtio_console.c8
-rw-r--r--drivers/cpufreq/cpufreq.c27
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c4
-rw-r--r--drivers/gpu/drm/drm_irq.c29
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c4
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c11
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c17
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h24
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c21
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c6
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h12
-rw-r--r--drivers/gpu/drm/i915/intel_display.c103
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c37
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c12
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c46
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vm.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c4
-rw-r--r--drivers/gpu/drm/radeon/r100.c4
-rw-r--r--drivers/gpu/drm/radeon/r300.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c5
-rw-r--r--drivers/hwmon/Kconfig19
-rw-r--r--drivers/hwmon/ad7414.c1
-rw-r--r--drivers/hwmon/adt7411.c1
-rw-r--r--drivers/hwmon/jc42.c35
-rw-r--r--drivers/hwmon/k10temp.c5
-rw-r--r--drivers/hwmon/lm85.c23
-rw-r--r--drivers/i2c/busses/i2c-omap.c35
-rw-r--r--drivers/i2c/busses/i2c-stu300.c2
-rw-r--r--drivers/idle/intel_idle.c24
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c32
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c5
-rw-r--r--drivers/input/gameport/gameport.c2
-rw-r--r--drivers/input/keyboard/tegra-kbc.c62
-rw-r--r--drivers/input/mouse/synaptics.h23
-rw-r--r--drivers/input/serio/serio.c2
-rw-r--r--drivers/isdn/hardware/eicon/istream.c2
-rw-r--r--drivers/isdn/hisax/isdnl2.c28
-rw-r--r--drivers/md/linear.c1
-rw-r--r--drivers/md/md.c31
-rw-r--r--drivers/md/md.h2
-rw-r--r--drivers/md/multipath.c1
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid1.c6
-rw-r--r--drivers/md/raid10.c7
-rw-r--r--drivers/md/raid5.c1
-rw-r--r--drivers/memstick/core/memstick.c2
-rw-r--r--drivers/message/fusion/mptbase.h4
-rw-r--r--drivers/message/fusion/mptctl.c8
-rw-r--r--drivers/message/fusion/mptscsih.c7
-rw-r--r--drivers/mfd/asic3.c4
-rw-r--r--drivers/mfd/davinci_voicecodec.c4
-rw-r--r--drivers/mfd/tps6586x.c10
-rw-r--r--drivers/mfd/ucb1x00-ts.c12
-rw-r--r--drivers/mfd/wm8994-core.c18
-rw-r--r--drivers/misc/bmp085.c1
-rw-r--r--drivers/misc/tifm_core.c2
-rw-r--r--drivers/misc/vmw_balloon.c2
-rw-r--r--drivers/mmc/core/sdio.c3
-rw-r--r--drivers/mmc/host/mmci.c345
-rw-r--r--drivers/mmc/host/mmci.h15
-rw-r--r--drivers/mtd/nand/r852.c2
-rw-r--r--drivers/mtd/sm_ftl.c2
-rw-r--r--drivers/net/bnx2x/bnx2x.h28
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c65
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.h20
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c25
-rw-r--r--drivers/net/bnx2x/bnx2x_init.h2
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c18
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.c4
-rw-r--r--drivers/net/can/mcp251x.c2
-rw-r--r--drivers/net/can/softing/Kconfig2
-rw-r--r--drivers/net/can/softing/softing_main.c1
-rw-r--r--drivers/net/cnic.c33
-rw-r--r--drivers/net/cxgb4vf/cxgb4vf_main.c80
-rw-r--r--drivers/net/cxgb4vf/t4vf_hw.c2
-rw-r--r--drivers/net/davinci_emac.c2
-rw-r--r--drivers/net/dm9000.c9
-rw-r--r--drivers/net/dnet.c3
-rw-r--r--drivers/net/e1000/e1000_osdep.h3
-rw-r--r--drivers/net/e1000e/netdev.c55
-rw-r--r--drivers/net/fec.c3
-rw-r--r--drivers/net/forcedeth.c2
-rw-r--r--drivers/net/igbvf/vf.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c51
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c6
-rw-r--r--drivers/net/macb.c2
-rw-r--r--drivers/net/pch_gbe/pch_gbe.h2
-rw-r--r--drivers/net/pch_gbe/pch_gbe_main.c104
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c1
-rw-r--r--drivers/net/r8169.c50
-rw-r--r--drivers/net/sfc/ethtool.c22
-rw-r--r--drivers/net/skge.c3
-rw-r--r--drivers/net/stmmac/stmmac_main.c4
-rw-r--r--drivers/net/tg3.c8
-rw-r--r--drivers/net/usb/dm9601.c4
-rw-r--r--drivers/net/usb/hso.c12
-rw-r--r--drivers/net/usb/usbnet.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c143
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c8
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c67
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c2
-rw-r--r--drivers/net/wireless/p54/p54pci.c14
-rw-r--r--drivers/net/wireless/p54/p54usb.c1
-rw-r--r--drivers/net/wireless/rndis_wlan.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c8
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c6
-rw-r--r--drivers/nfc/Kconfig2
-rw-r--r--drivers/nfc/pn544.c4
-rw-r--r--drivers/of/pdt.c112
-rw-r--r--drivers/pcmcia/pcmcia_resource.c2
-rw-r--r--drivers/pcmcia/pxa2xx_base.c2
-rw-r--r--drivers/pcmcia/pxa2xx_base.h1
-rw-r--r--drivers/pcmcia/pxa2xx_colibri.c3
-rw-r--r--drivers/pcmcia/pxa2xx_lubbock.c1
-rw-r--r--drivers/platform/x86/Kconfig2
-rw-r--r--drivers/platform/x86/acer-wmi.c4
-rw-r--r--drivers/platform/x86/asus_acpi.c8
-rw-r--r--drivers/platform/x86/dell-laptop.c24
-rw-r--r--drivers/platform/x86/intel_pmic_gpio.c116
-rw-r--r--drivers/platform/x86/tc1100-wmi.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c8
-rw-r--r--drivers/pps/generators/Kconfig2
-rw-r--r--drivers/pps/kapi.c2
-rw-r--r--drivers/rapidio/rio-sysfs.c12
-rw-r--r--drivers/regulator/mc13xxx-regulator-core.c2
-rw-r--r--drivers/regulator/wm831x-dcdc.c1
-rw-r--r--drivers/rtc/Kconfig12
-rw-r--r--drivers/rtc/interface.c23
-rw-r--r--drivers/rtc/rtc-at91sam9.c2
-rw-r--r--drivers/rtc/rtc-dev.c104
-rw-r--r--drivers/rtc/rtc-ds3232.c14
-rw-r--r--drivers/rtc/rtc-s3c.c12
-rw-r--r--drivers/s390/block/dasd_eckd.c2
-rw-r--r--drivers/s390/block/xpram.c4
-rw-r--r--drivers/s390/char/keyboard.c3
-rw-r--r--drivers/s390/char/tape.h8
-rw-r--r--drivers/s390/char/tape_34xx.c59
-rw-r--r--drivers/s390/char/tape_3590.c83
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c10
-rw-r--r--drivers/scsi/scsi_debug.c2
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/scsi_transport_fc.c2
-rw-r--r--drivers/spi/pxa2xx_spi_pci.c61
-rw-r--r--drivers/target/Makefile3
-rw-r--r--drivers/target/target_core_configfs.c155
-rw-r--r--drivers/target/target_core_device.c13
-rw-r--r--drivers/target/target_core_fabric_configfs.c92
-rw-r--r--drivers/target/target_core_iblock.c8
-rw-r--r--drivers/target/target_core_mib.c1078
-rw-r--r--drivers/target/target_core_mib.h28
-rw-r--r--drivers/target/target_core_pscsi.c4
-rw-r--r--drivers/target/target_core_tpg.c29
-rw-r--r--drivers/target/target_core_transport.c48
-rw-r--r--drivers/thermal/Kconfig1
-rw-r--r--drivers/thermal/thermal_sys.c40
-rw-r--r--drivers/tty/serial/amba-pl011.c511
-rw-r--r--drivers/tty/serial/max3100.c2
-rw-r--r--drivers/tty/serial/max3107.c2
-rw-r--r--drivers/tty/serial/serial_cs.c1
-rw-r--r--drivers/usb/core/hub.c18
-rw-r--r--drivers/usb/core/quirks.c8
-rw-r--r--drivers/usb/gadget/f_phonet.c15
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c1
-rw-r--r--drivers/usb/host/xhci-dbg.c9
-rw-r--r--drivers/usb/host/xhci-mem.c10
-rw-r--r--drivers/usb/host/xhci-ring.c40
-rw-r--r--drivers/usb/host/xhci.c14
-rw-r--r--drivers/usb/host/xhci.h2
-rw-r--r--drivers/usb/musb/musb_core.c1
-rw-r--r--drivers/usb/musb/musb_core.h17
-rw-r--r--drivers/usb/musb/omap2430.c1
-rw-r--r--drivers/usb/serial/sierra.c3
-rw-r--r--drivers/usb/serial/usb_wwan.c15
-rw-r--r--drivers/usb/serial/visor.c12
-rw-r--r--drivers/video/backlight/ltv350qv.c9
-rw-r--r--drivers/xen/manage.c10
215 files changed, 2933 insertions, 2433 deletions
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 54784bb..edc2586 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -416,10 +416,15 @@ struct acpi_gpe_handler_info {
u8 originally_enabled; /* True if GPE was originally enabled */
};
+struct acpi_gpe_notify_object {
+ struct acpi_namespace_node *node;
+ struct acpi_gpe_notify_object *next;
+};
+
union acpi_gpe_dispatch_info {
struct acpi_namespace_node *method_node; /* Method node for this GPE level */
struct acpi_gpe_handler_info *handler; /* Installed GPE handler */
- struct acpi_namespace_node *device_node; /* Parent _PRW device for implicit notify */
+ struct acpi_gpe_notify_object device; /* List of _PRW devices for implicit notify */
};
/*
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 14988a8..f472521 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -457,6 +457,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
acpi_status status;
struct acpi_gpe_event_info *local_gpe_event_info;
struct acpi_evaluate_info *info;
+ struct acpi_gpe_notify_object *notify_object;
ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
@@ -508,10 +509,18 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
* from this thread -- because handlers may in turn run other
* control methods.
*/
- status =
- acpi_ev_queue_notify_request(local_gpe_event_info->dispatch.
- device_node,
- ACPI_NOTIFY_DEVICE_WAKE);
+ status = acpi_ev_queue_notify_request(
+ local_gpe_event_info->dispatch.device.node,
+ ACPI_NOTIFY_DEVICE_WAKE);
+
+ notify_object = local_gpe_event_info->dispatch.device.next;
+ while (ACPI_SUCCESS(status) && notify_object) {
+ status = acpi_ev_queue_notify_request(
+ notify_object->node,
+ ACPI_NOTIFY_DEVICE_WAKE);
+ notify_object = notify_object->next;
+ }
+
break;
case ACPI_GPE_DISPATCH_METHOD:
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 3b20a34..52aaff3 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -198,7 +198,9 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
acpi_status status = AE_BAD_PARAMETER;
struct acpi_gpe_event_info *gpe_event_info;
struct acpi_namespace_node *device_node;
+ struct acpi_gpe_notify_object *notify_object;
acpi_cpu_flags flags;
+ u8 gpe_dispatch_mask;
ACPI_FUNCTION_TRACE(acpi_setup_gpe_for_wake);
@@ -221,27 +223,49 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
goto unlock_and_exit;
}
+ if (wake_device == ACPI_ROOT_OBJECT) {
+ goto out;
+ }
+
/*
* If there is no method or handler for this GPE, then the
* wake_device will be notified whenever this GPE fires (aka
* "implicit notify") Note: The GPE is assumed to be
* level-triggered (for windows compatibility).
*/
- if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
- ACPI_GPE_DISPATCH_NONE) && (wake_device != ACPI_ROOT_OBJECT)) {
+ gpe_dispatch_mask = gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK;
+ if (gpe_dispatch_mask != ACPI_GPE_DISPATCH_NONE
+ && gpe_dispatch_mask != ACPI_GPE_DISPATCH_NOTIFY) {
+ goto out;
+ }
- /* Validate wake_device is of type Device */
+ /* Validate wake_device is of type Device */
- device_node = ACPI_CAST_PTR(struct acpi_namespace_node,
- wake_device);
- if (device_node->type != ACPI_TYPE_DEVICE) {
- goto unlock_and_exit;
- }
+ device_node = ACPI_CAST_PTR(struct acpi_namespace_node, wake_device);
+ if (device_node->type != ACPI_TYPE_DEVICE) {
+ goto unlock_and_exit;
+ }
+
+ if (gpe_dispatch_mask == ACPI_GPE_DISPATCH_NONE) {
gpe_event_info->flags = (ACPI_GPE_DISPATCH_NOTIFY |
ACPI_GPE_LEVEL_TRIGGERED);
- gpe_event_info->dispatch.device_node = device_node;
+ gpe_event_info->dispatch.device.node = device_node;
+ gpe_event_info->dispatch.device.next = NULL;
+ } else {
+ /* There are multiple devices to notify implicitly. */
+
+ notify_object = ACPI_ALLOCATE_ZEROED(sizeof(*notify_object));
+ if (!notify_object) {
+ status = AE_NO_MEMORY;
+ goto unlock_and_exit;
+ }
+
+ notify_object->node = device_node;
+ notify_object->next = gpe_event_info->dispatch.device.next;
+ gpe_event_info->dispatch.device.next = notify_object;
}
+ out:
gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
status = AE_OK;
diff --git a/drivers/acpi/debugfs.c b/drivers/acpi/debugfs.c
index 5df67f1..384f7ab 100644
--- a/drivers/acpi/debugfs.c
+++ b/drivers/acpi/debugfs.c
@@ -26,7 +26,9 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
size_t count, loff_t *ppos)
{
static char *buf;
- static int uncopied_bytes;
+ static u32 max_size;
+ static u32 uncopied_bytes;
+
struct acpi_table_header table;
acpi_status status;
@@ -37,19 +39,24 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
if (copy_from_user(&table, user_buf,
sizeof(struct acpi_table_header)))
return -EFAULT;
- uncopied_bytes = table.length;
- buf = kzalloc(uncopied_bytes, GFP_KERNEL);
+ uncopied_bytes = max_size = table.length;
+ buf = kzalloc(max_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
}
- if (uncopied_bytes < count) {
- kfree(buf);
+ if (buf == NULL)
+ return -EINVAL;
+
+ if ((*ppos > max_size) ||
+ (*ppos + count > max_size) ||
+ (*ppos + count < count) ||
+ (count > uncopied_bytes))
return -EINVAL;
- }
if (copy_from_user(buf + (*ppos), user_buf, count)) {
kfree(buf);
+ buf = NULL;
return -EFAULT;
}
@@ -59,6 +66,7 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
if (!uncopied_bytes) {
status = acpi_install_method(buf);
kfree(buf);
+ buf = NULL;
if (ACPI_FAILURE(status))
return -EINVAL;
add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index 73fb1c4..25ef1a4 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -866,8 +866,9 @@ static int popen(struct atm_vcc *vcc)
}
skb = alloc_skb(sizeof(*header), GFP_ATOMIC);
- if (!skb && net_ratelimit()) {
- dev_warn(&card->dev->dev, "Failed to allocate sk_buff in popen()\n");
+ if (!skb) {
+ if (net_ratelimit())
+ dev_warn(&card->dev->dev, "Failed to allocate sk_buff in popen()\n");
return -ENOMEM;
}
header = (void *)skb_put(skb, sizeof(*header));
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index b9ba04f..77fc76f 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3281,7 +3281,7 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g,
struct block_device *bdev = opened_bdev[cnt];
if (!bdev || ITYPE(drive_state[cnt].fd_device) != type)
continue;
- __invalidate_device(bdev);
+ __invalidate_device(bdev, true);
}
mutex_unlock(&open_lock);
} else {
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 49e6a54..dbf31ec 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -78,7 +78,6 @@
#include <asm/uaccess.h>
-static DEFINE_MUTEX(loop_mutex);
static LIST_HEAD(loop_devices);
static DEFINE_MUTEX(loop_devices_mutex);
@@ -1501,11 +1500,9 @@ static int lo_open(struct block_device *bdev, fmode_t mode)
{
struct loop_device *lo = bdev->bd_disk->private_data;
- mutex_lock(&loop_mutex);
mutex_lock(&lo->lo_ctl_mutex);
lo->lo_refcnt++;
mutex_unlock(&lo->lo_ctl_mutex);
- mutex_unlock(&loop_mutex);
return 0;
}
@@ -1515,7 +1512,6 @@ static int lo_release(struct gendisk *disk, fmode_t mode)
struct loop_device *lo = disk->private_data;
int err;
- mutex_lock(&loop_mutex);
mutex_lock(&lo->lo_ctl_mutex);
if (--lo->lo_refcnt)
@@ -1540,7 +1536,6 @@ static int lo_release(struct gendisk *disk, fmode_t mode)
out:
mutex_unlock(&lo->lo_ctl_mutex);
out_unlocked:
- mutex_unlock(&loop_mutex);
return 0;
}
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index a126e61..6dcd55a 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -39,6 +39,11 @@ static struct usb_device_id ath3k_table[] = {
/* Atheros AR3011 with sflash firmware*/
{ USB_DEVICE(0x0CF3, 0x3002) },
+ /* Atheros AR9285 Malbec with sflash firmware */
+ { USB_DEVICE(0x03F0, 0x311D) },
+
+ /* Atheros AR5BBU12 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xE02C) },
{ } /* Terminating entry */
};
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 1da773f8..700a384 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -102,6 +102,12 @@ static struct usb_device_id blacklist_table[] = {
/* Atheros 3011 with sflash firmware */
{ USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
+ /* Atheros AR9285 Malbec with sflash firmware */
+ { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
+
+ /* Atheros AR5BBU12 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
+
/* Broadcom BCM2035 */
{ USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU },
{ USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU },
@@ -826,7 +832,7 @@ static void btusb_work(struct work_struct *work)
if (hdev->conn_hash.sco_num > 0) {
if (!test_bit(BTUSB_DID_ISO_RESUME, &data->flags)) {
- err = usb_autopm_get_interface(data->isoc);
+ err = usb_autopm_get_interface(data->isoc ? data->isoc : data->intf);
if (err < 0) {
clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
usb_kill_anchored_urbs(&data->isoc_anchor);
@@ -855,7 +861,7 @@ static void btusb_work(struct work_struct *work)
__set_isoc_interface(hdev, 0);
if (test_and_clear_bit(BTUSB_DID_ISO_RESUME, &data->flags))
- usb_autopm_put_interface(data->isoc);
+ usb_autopm_put_interface(data->isoc ? data->isoc : data->intf);
}
}
@@ -1038,8 +1044,6 @@ static int btusb_probe(struct usb_interface *intf,
usb_set_intfdata(intf, data);
- usb_enable_autosuspend(interface_to_usbdev(intf));
-
return 0;
}
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 9252e85..780498d 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -773,18 +773,23 @@ int __init agp_amd64_init(void)
#else
printk(KERN_INFO PFX "You can boot with agp=try_unsupported\n");
#endif
+ pci_unregister_driver(&agp_amd64_pci_driver);
return -ENODEV;
}
/* First check that we have at least one AMD64 NB */
- if (!pci_dev_present(amd_nb_misc_ids))
+ if (!pci_dev_present(amd_nb_misc_ids)) {
+ pci_unregister_driver(&agp_amd64_pci_driver);
return -ENODEV;
+ }
/* Look for any AGP bridge */
agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table;
err = driver_attach(&agp_amd64_pci_driver.driver);
- if (err == 0 && agp_bridges_found == 0)
+ if (err == 0 && agp_bridges_found == 0) {
+ pci_unregister_driver(&agp_amd64_pci_driver);
err = -ENODEV;
+ }
}
return err;
}
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index c195bfe..5feebe2 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -130,6 +130,7 @@
#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
#define I915_IFPADDR 0x60
+#define I830_HIC 0x70
/* Intel 965G registers */
#define I965_MSAC 0x62
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index fab3d32..0d09b53 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -21,6 +21,7 @@
#include <linux/kernel.h>
#include <linux/pagemap.h>
#include <linux/agp_backend.h>
+#include <linux/delay.h>
#include <asm/smp.h>
#include "agp.h"
#include "intel-agp.h"
@@ -70,12 +71,8 @@ static struct _intel_private {
u32 __iomem *gtt; /* I915G */
bool clear_fake_agp; /* on first access via agp, fill with scratch */
int num_dcache_entries;
- union {
- void __iomem *i9xx_flush_page;
- void *i8xx_flush_page;
- };
+ void __iomem *i9xx_flush_page;
char *i81x_gtt_table;
- struct page *i8xx_page;
struct resource ifp_resource;
int resource_valid;
struct page *scratch_page;
@@ -722,28 +719,6 @@ static int intel_fake_agp_fetch_size(void)
static void i830_cleanup(void)
{
- if (intel_private.i8xx_flush_page) {
- kunmap(intel_private.i8xx_flush_page);
- intel_private.i8xx_flush_page = NULL;
- }
-
- __free_page(intel_private.i8xx_page);
- intel_private.i8xx_page = NULL;
-}
-
-static void intel_i830_setup_flush(void)
-{
- /* return if we've already set the flush mechanism up */
- if (intel_private.i8xx_page)
- return;
-
- intel_private.i8xx_page = alloc_page(GFP_KERNEL);
- if (!intel_private.i8xx_page)
- return;
-
- intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
- if (!intel_private.i8xx_flush_page)
- i830_cleanup();
}
/* The chipset_flush interface needs to get data that has already been
@@ -758,14 +733,27 @@ static void intel_i830_setup_flush(void)
*/
static void i830_chipset_flush(void)
{
- unsigned int *pg = intel_private.i8xx_flush_page;
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+
+ /* Forcibly evict everything from the CPU write buffers.
+ * clflush appears to be insufficient.
+ */
+ wbinvd_on_all_cpus();
+
+ /* Now we've only seen documents for this magic bit on 855GM,
+ * we hope it exists for the other gen2 chipsets...
+ *
+ * Also works as advertised on my 845G.
+ */
+ writel(readl(intel_private.registers+I830_HIC) | (1<<31),
+ intel_private.registers+I830_HIC);
- memset(pg, 0, 1024);
+ while (readl(intel_private.registers+I830_HIC) & (1<<31)) {
+ if (time_after(jiffies, timeout))
+ break;
- if (cpu_has_clflush)
- clflush_cache_range(pg, 1024);
- else if (wbinvd_on_all_cpus() != 0)
- printk(KERN_ERR "Timed out waiting for cache flush.\n");
+ udelay(50);
+ }
}
static void i830_write_entry(dma_addr_t addr, unsigned int entry,
@@ -849,8 +837,6 @@ static int i830_setup(void)
intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
- intel_i830_setup_flush();
-
return 0;
}
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 777181a..bcbbc71 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -830,8 +830,7 @@ static void monitor_card(unsigned long p)
test_bit(IS_ANY_T1, &dev->flags))) {
DEBUGP(4, dev, "Perform AUTOPPS\n");
set_bit(IS_AUTOPPS_ACT, &dev->flags);
- ptsreq.protocol = ptsreq.protocol =
- (0x01 << dev->proto);
+ ptsreq.protocol = (0x01 << dev->proto);
ptsreq.flags = 0x01;
ptsreq.pts1 = 0x00;
ptsreq.pts2 = 0x00;
diff --git a/drivers/char/pcmcia/ipwireless/main.c b/drivers/char/pcmcia/ipwireless/main.c
index 94b8eb4..444155a 100644
--- a/drivers/char/pcmcia/ipwireless/main.c
+++ b/drivers/char/pcmcia/ipwireless/main.c
@@ -78,7 +78,6 @@ static void signalled_reboot_callback(void *callback_data)
static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
{
struct ipw_dev *ipw = priv_data;
- struct resource *io_resource;
int ret;
p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
@@ -92,9 +91,12 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
if (ret)
return ret;
- io_resource = request_region(p_dev->resource[0]->start,
- resource_size(p_dev->resource[0]),
- IPWIRELESS_PCCARD_NAME);
+ if (!request_region(p_dev->resource[0]->start,
+ resource_size(p_dev->resource[0]),
+ IPWIRELESS_PCCARD_NAME)) {
+ ret = -EBUSY;
+ goto exit;
+ }
p_dev->resource[2]->flags |=
WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM | WIN_ENABLE;
@@ -105,22 +107,25 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
ret = pcmcia_map_mem_page(p_dev, p_dev->resource[2], p_dev->card_addr);
if (ret != 0)
- goto exit2;
+ goto exit1;
ipw->is_v2_card = resource_size(p_dev->resource[2]) == 0x100;
- ipw->attr_memory = ioremap(p_dev->resource[2]->start,
+ ipw->common_memory = ioremap(p_dev->resource[2]->start,
resource_size(p_dev->resource[2]));
- request_mem_region(p_dev->resource[2]->start,
- resource_size(p_dev->resource[2]),
- IPWIRELESS_PCCARD_NAME);
+ if (!request_mem_region(p_dev->resource[2]->start,
+ resource_size(p_dev->resource[2]),
+ IPWIRELESS_PCCARD_NAME)) {
+ ret = -EBUSY;
+ goto exit2;
+ }
p_dev->resource[3]->flags |= WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_AM |
WIN_ENABLE;
p_dev->resource[3]->end = 0; /* this used to be 0x1000 */
ret = pcmcia_request_window(p_dev, p_dev->resource[3], 0);
if (ret != 0)
- goto exit2;
+ goto exit3;
ret = pcmcia_map_mem_page(p_dev, p_dev->resource[3], 0);
if (ret != 0)
@@ -128,23 +133,28 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
ipw->attr_memory = ioremap(p_dev->resource[3]->start,
resource_size(p_dev->resource[3]));
- request_mem_region(p_dev->resource[3]->start,
- resource_size(p_dev->resource[3]),
- IPWIRELESS_PCCARD_NAME);
+ if (!request_mem_region(p_dev->resource[3]->start,
+ resource_size(p_dev->resource[3]),
+ IPWIRELESS_PCCARD_NAME)) {
+ ret = -EBUSY;
+ goto exit4;
+ }
return 0;
+exit4:
+ iounmap(ipw->attr_memory);
exit3:
+ release_mem_region(p_dev->resource[2]->start,
+ resource_size(p_dev->resource[2]));
exit2:
- if (ipw->common_memory) {
- release_mem_region(p_dev->resource[2]->start,
- resource_size(p_dev->resource[2]));
- iounmap(ipw->common_memory);
- }
+ iounmap(ipw->common_memory);
exit1:
- release_resource(io_resource);
+ release_region(p_dev->resource[0]->start,
+ resource_size(p_dev->resource[0]));
+exit:
pcmcia_disable_device(p_dev);
- return -1;
+ return ret;
}
static int config_ipwireless(struct ipw_dev *ipw)
@@ -219,6 +229,8 @@ exit:
static void release_ipwireless(struct ipw_dev *ipw)
{
+ release_region(ipw->link->resource[0]->start,
+ resource_size(ipw->link->resource[0]));
if (ipw->common_memory) {
release_mem_region(ipw->link->resource[2]->start,
resource_size(ipw->link->resource[2]));
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index faf5a2c..1f46f1c 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -364,14 +364,12 @@ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip,
tpm_protected_ordinal_duration[ordinal &
TPM_PROTECTED_ORDINAL_MASK];
- if (duration_idx != TPM_UNDEFINED) {
+ if (duration_idx != TPM_UNDEFINED)
duration = chip->vendor.duration[duration_idx];
- /* if duration is 0, it's because chip->vendor.duration wasn't */
- /* filled yet, so we set the lowest timeout just to give enough */
- /* time for tpm_get_timeouts() to succeed */
- return (duration <= 0 ? HZ : duration);
- } else
+ if (duration <= 0)
return 2 * 60 * HZ;
+ else
+ return duration;
}
EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration);
@@ -577,11 +575,9 @@ duration:
if (rc)
return;
- if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 ||
- be32_to_cpu(tpm_cmd.header.out.length)
- != sizeof(tpm_cmd.header.out) + sizeof(u32) + 3 * sizeof(u32))
+ if (be32_to_cpu(tpm_cmd.header.out.return_code)
+ != 3 * sizeof(u32))
return;
-
duration_cap = &tpm_cmd.params.getcap_out.cap.duration;
chip->vendor.duration[TPM_SHORT] =
usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short));
@@ -941,18 +937,6 @@ ssize_t tpm_show_caps_1_2(struct device * dev,
}
EXPORT_SYMBOL_GPL(tpm_show_caps_1_2);
-ssize_t tpm_show_timeouts(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct tpm_chip *chip = dev_get_drvdata(dev);
-
- return sprintf(buf, "%d %d %d\n",
- jiffies_to_usecs(chip->vendor.duration[TPM_SHORT]),
- jiffies_to_usecs(chip->vendor.duration[TPM_MEDIUM]),
- jiffies_to_usecs(chip->vendor.duration[TPM_LONG]));
-}
-EXPORT_SYMBOL_GPL(tpm_show_timeouts);
-
ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index d84ff77..72ddb03 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -56,8 +56,6 @@ extern ssize_t tpm_show_owned(struct device *, struct device_attribute *attr,
char *);
extern ssize_t tpm_show_temp_deactivated(struct device *,
struct device_attribute *attr, char *);
-extern ssize_t tpm_show_timeouts(struct device *,
- struct device_attribute *attr, char *);
struct tpm_chip;
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 0d1d38e..dd21df5 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -376,7 +376,6 @@ static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
NULL);
static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
-static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
static struct attribute *tis_attrs[] = {
&dev_attr_pubek.attr,
@@ -386,8 +385,7 @@ static struct attribute *tis_attrs[] = {
&dev_attr_owned.attr,
&dev_attr_temp_deactivated.attr,
&dev_attr_caps.attr,
- &dev_attr_cancel.attr,
- &dev_attr_timeouts.attr, NULL,
+ &dev_attr_cancel.attr, NULL,
};
static struct attribute_group tis_attr_grp = {
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 4903931..84b164d 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -388,6 +388,10 @@ static void discard_port_data(struct port *port)
unsigned int len;
int ret;
+ if (!port->portdev) {
+ /* Device has been unplugged. vqs are already gone. */
+ return;
+ }
vq = port->in_vq;
if (port->inbuf)
buf = port->inbuf;
@@ -470,6 +474,10 @@ static void reclaim_consumed_buffers(struct port *port)
void *buf;
unsigned int len;
+ if (!port->portdev) {
+ /* Device has been unplugged. vqs are already gone. */
+ return;
+ }
while ((buf = virtqueue_get_buf(port->out_vq, &len))) {
kfree(buf);
port->outvq_full = false;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 1109f68..5cb4d09 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1919,8 +1919,10 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
ret = sysdev_driver_register(&cpu_sysdev_class,
&cpufreq_sysdev_driver);
+ if (ret)
+ goto err_null_driver;
- if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) {
+ if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
int i;
ret = -ENODEV;
@@ -1935,21 +1937,22 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
if (ret) {
dprintk("no CPU initialized for driver %s\n",
driver_data->name);
- sysdev_driver_unregister(&cpu_sysdev_class,
- &cpufreq_sysdev_driver);
-
- spin_lock_irqsave(&cpufreq_driver_lock, flags);
- cpufreq_driver = NULL;
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ goto err_sysdev_unreg;
}
}
- if (!ret) {
- register_hotcpu_notifier(&cpufreq_cpu_notifier);
- dprintk("driver %s up and running\n", driver_data->name);
- cpufreq_debug_enable_ratelimit();
- }
+ register_hotcpu_notifier(&cpufreq_cpu_notifier);
+ dprintk("driver %s up and running\n", driver_data->name);
+ cpufreq_debug_enable_ratelimit();
+ return 0;
+err_sysdev_unreg:
+ sysdev_driver_unregister(&cpu_sysdev_class,
+ &cpufreq_sysdev_driver);
+err_null_driver:
+ spin_lock_irqsave(&cpufreq_driver_lock, flags);
+ cpufreq_driver = NULL;
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(cpufreq_register_driver);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 6977a1c..f73ef43 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -672,7 +672,7 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
struct drm_crtc_helper_funcs *crtc_funcs;
u16 *red, *green, *blue, *transp;
struct drm_crtc *crtc;
- int i, rc = 0;
+ int i, j, rc = 0;
int start;
for (i = 0; i < fb_helper->crtc_count; i++) {
@@ -685,7 +685,7 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
transp = cmap->transp;
start = cmap->start;
- for (i = 0; i < cmap->len; i++) {
+ for (j = 0; j < cmap->len; j++) {
u16 hred, hgreen, hblue, htransp = 0xffff;
hred = *red++;
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 3dadfa2..28d1d3c 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -164,8 +164,10 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
* available. In that case we can't account for this and just
* hope for the best.
*/
- if ((vblrc > 0) && (abs(diff_ns) > 1000000))
+ if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) {
atomic_inc(&dev->_vblank_count[crtc]);
+ smp_mb__after_atomic_inc();
+ }
/* Invalidate all timestamps while vblank irq's are off. */
clear_vblank_timestamps(dev, crtc);
@@ -491,6 +493,12 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc)
/* Dot clock in Hz: */
dotclock = (u64) crtc->hwmode.clock * 1000;
+ /* Fields of interlaced scanout modes are only halve a frame duration.
+ * Double the dotclock to get halve the frame-/line-/pixelduration.
+ */
+ if (crtc->hwmode.flags & DRM_MODE_FLAG_INTERLACE)
+ dotclock *= 2;
+
/* Valid dotclock? */
if (dotclock > 0) {
/* Convert scanline length in pixels and video dot clock to
@@ -603,14 +611,6 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
return -EAGAIN;
}
- /* Don't know yet how to handle interlaced or
- * double scan modes. Just no-op for now.
- */
- if (mode->flags & (DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN)) {
- DRM_DEBUG("crtc %d: Noop due to unsupported mode.\n", crtc);
- return -ENOTSUPP;
- }
-
/* Get current scanout position with system timestamp.
* Repeat query up to DRM_TIMESTAMP_MAXRETRIES times
* if single query takes longer than max_error nanoseconds.
@@ -858,10 +858,11 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
if (rc) {
tslot = atomic_read(&dev->_vblank_count[crtc]) + diff;
vblanktimestamp(dev, crtc, tslot) = t_vblank;
- smp_wmb();
}
+ smp_mb__before_atomic_inc();
atomic_add(diff, &dev->_vblank_count[crtc]);
+ smp_mb__after_atomic_inc();
}
/**
@@ -1011,7 +1012,8 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_modeset_ctl *modeset = data;
- int crtc, ret = 0;
+ int ret = 0;
+ unsigned int crtc;
/* If drm_vblank_init() hasn't been called yet, just no-op */
if (!dev->num_crtcs)
@@ -1293,15 +1295,16 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
* e.g., due to spurious vblank interrupts. We need to
* ignore those for accounting.
*/
- if (abs(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) {
+ if (abs64(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) {
/* Store new timestamp in ringbuffer. */
vblanktimestamp(dev, crtc, vblcount + 1) = tvblank;
- smp_wmb();
/* Increment cooked vblank count. This also atomically commits
* the timestamp computed above.
*/
+ smp_mb__before_atomic_inc();
atomic_inc(&dev->_vblank_count[crtc]);
+ smp_mb__after_atomic_inc();
} else {
DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
crtc, (int) diff_ns);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 3601466..4ff9b6c 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -865,7 +865,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
int max_freq;
/* RPSTAT1 is in the GT power well */
- __gen6_force_wake_get(dev_priv);
+ __gen6_gt_force_wake_get(dev_priv);
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1));
@@ -888,7 +888,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
max_freq * 100);
- __gen6_force_wake_put(dev_priv);
+ __gen6_gt_force_wake_put(dev_priv);
} else {
seq_printf(m, "no P-state info available\n");
}
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 17bd766..e33d9be 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1895,6 +1895,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (IS_GEN2(dev))
dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
+ /* 965GM sometimes incorrectly writes to hardware status page (HWS)
+ * using 32bit addressing, overwriting memory if HWS is located
+ * above 4GB.
+ *
+ * The documentation also mentions an issue with undefined
+ * behaviour if any general state is accessed within a page above 4GB,
+ * which also needs to be handled carefully.
+ */
+ if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
+ dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
+
mmio_bar = IS_GEN2(dev) ? 1 : 0;
dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0);
if (!dev_priv->regs) {
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 0ad533f..22ec066 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -46,6 +46,9 @@ module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
unsigned int i915_powersave = 1;
module_param_named(powersave, i915_powersave, int, 0600);
+unsigned int i915_semaphores = 0;
+module_param_named(semaphores, i915_semaphores, int, 0600);
+
unsigned int i915_enable_rc6 = 0;
module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
@@ -254,7 +257,7 @@ void intel_detect_pch (struct drm_device *dev)
}
}
-void __gen6_force_wake_get(struct drm_i915_private *dev_priv)
+void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
{
int count;
@@ -270,12 +273,22 @@ void __gen6_force_wake_get(struct drm_i915_private *dev_priv)
udelay(10);
}
-void __gen6_force_wake_put(struct drm_i915_private *dev_priv)
+void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE, 0);
POSTING_READ(FORCEWAKE);
}
+void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
+{
+ int loop = 500;
+ u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+ while (fifo < 20 && loop--) {
+ udelay(10);
+ fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+ }
+}
+
static int i915_drm_freeze(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 65dfe81..456f404 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -956,6 +956,7 @@ extern struct drm_ioctl_desc i915_ioctls[];
extern int i915_max_ioctl;
extern unsigned int i915_fbpercrtc;
extern unsigned int i915_powersave;
+extern unsigned int i915_semaphores;
extern unsigned int i915_lvds_downclock;
extern unsigned int i915_panel_use_ssc;
extern unsigned int i915_enable_rc6;
@@ -1177,6 +1178,9 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
void i915_gem_free_all_phys_object(struct drm_device *dev);
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
+uint32_t
+i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj);
+
/* i915_gem_gtt.c */
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
@@ -1353,22 +1357,32 @@ __i915_write(64, q)
* must be set to prevent GT core from power down and stale values being
* returned.
*/
-void __gen6_force_wake_get(struct drm_i915_private *dev_priv);
-void __gen6_force_wake_put (struct drm_i915_private *dev_priv);
-static inline u32 i915_safe_read(struct drm_i915_private *dev_priv, u32 reg)
+void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
+void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
+void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
+
+static inline u32 i915_gt_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val;
if (dev_priv->info->gen >= 6) {
- __gen6_force_wake_get(dev_priv);
+ __gen6_gt_force_wake_get(dev_priv);
val = I915_READ(reg);
- __gen6_force_wake_put(dev_priv);
+ __gen6_gt_force_wake_put(dev_priv);
} else
val = I915_READ(reg);
return val;
}
+static inline void i915_gt_write(struct drm_i915_private *dev_priv,
+ u32 reg, u32 val)
+{
+ if (dev_priv->info->gen >= 6)
+ __gen6_gt_wait_for_fifo(dev_priv);
+ I915_WRITE(reg, val);
+}
+
static inline void
i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len)
{
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index cf4f74c..36e66cc 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1398,7 +1398,7 @@ i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj)
* Return the required GTT alignment for an object, only taking into account
* unfenced tiled surface requirements.
*/
-static uint32_t
+uint32_t
i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index d2f445e..50ab161 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -772,8 +772,8 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
if (from == NULL || to == from)
return 0;
- /* XXX gpu semaphores are currently causing hard hangs on SNB mobile */
- if (INTEL_INFO(obj->base.dev)->gen < 6 || IS_MOBILE(obj->base.dev))
+ /* XXX gpu semaphores are implicated in various hard hangs on SNB */
+ if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores)
return i915_gem_object_wait_rendering(obj, true);
idx = intel_ring_sync_index(from, to);
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 22a32b9..d64843e 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -349,14 +349,27 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
(obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end &&
i915_gem_object_fence_ok(obj, args->tiling_mode));
- obj->tiling_changed = true;
- obj->tiling_mode = args->tiling_mode;
- obj->stride = args->stride;
+ /* Rebind if we need a change of alignment */
+ if (!obj->map_and_fenceable) {
+ u32 unfenced_alignment =
+ i915_gem_get_unfenced_gtt_alignment(obj);
+ if (obj->gtt_offset & (unfenced_alignment - 1))
+ ret = i915_gem_object_unbind(obj);
+ }
+
+ if (ret == 0) {
+ obj->tiling_changed = true;
+ obj->tiling_mode = args->tiling_mode;
+ obj->stride = args->stride;
+ }
}
+ /* we have to maintain this existing ABI... */
+ args->stride = obj->stride;
+ args->tiling_mode = obj->tiling_mode;
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
- return 0;
+ return ret;
}
/**
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 97f946dc..8a9e08b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -316,6 +316,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
+ DRM_DEBUG_KMS("running encoder hotplug functions\n");
+
list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
if (encoder->hot_plug)
encoder->hot_plug(encoder);
@@ -1649,9 +1651,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
} else {
hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
- hotplug_mask |= SDE_AUX_MASK | SDE_FDI_MASK | SDE_TRANS_MASK;
- I915_WRITE(FDI_RXA_IMR, 0);
- I915_WRITE(FDI_RXB_IMR, 0);
+ hotplug_mask |= SDE_AUX_MASK;
}
dev_priv->pch_irq_mask = ~hotplug_mask;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 15d94c6..3e6f486 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1553,17 +1553,7 @@
/* Backlight control */
#define BLC_PWM_CTL 0x61254
-#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
#define BLC_PWM_CTL2 0x61250 /* 965+ only */
-#define BLM_COMBINATION_MODE (1 << 30)
-/*
- * This is the most significant 15 bits of the number of backlight cycles in a
- * complete cycle of the modulated backlight control.
- *
- * The actual value is this field multiplied by two.
- */
-#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
-#define BLM_LEGACY_MODE (1 << 16)
/*
* This is the number of cycles out of the backlight modulation cycle for which
* the backlight is on.
@@ -3271,6 +3261,8 @@
#define FORCEWAKE 0xA18C
#define FORCEWAKE_ACK 0x130090
+#define GT_FIFO_FREE_ENTRIES 0x120008
+
#define GEN6_RPNSWREQ 0xA008
#define GEN6_TURBO_DISABLE (1<<31)
#define GEN6_FREQUENCY(x) ((x)<<25)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3b00653..49fb54f 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1219,7 +1219,7 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev)
u32 blt_ecoskpd;
/* Make sure blitter notifies FBC of writes */
- __gen6_force_wake_get(dev_priv);
+ __gen6_gt_force_wake_get(dev_priv);
blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
GEN6_BLITTER_LOCK_SHIFT;
@@ -1230,7 +1230,7 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev)
GEN6_BLITTER_LOCK_SHIFT);
I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
POSTING_READ(GEN6_BLITTER_ECOSKPD);
- __gen6_force_wake_put(dev_priv);
+ __gen6_gt_force_wake_put(dev_priv);
}
static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
@@ -1630,19 +1630,19 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
wait_event(dev_priv->pending_flip_queue,
+ atomic_read(&dev_priv->mm.wedged) ||
atomic_read(&obj->pending_flip) == 0);
/* Big Hammer, we also need to ensure that any pending
* MI_WAIT_FOR_EVENT inside a user batch buffer on the
* current scanout is retired before unpinning the old
* framebuffer.
+ *
+ * This should only fail upon a hung GPU, in which case we
+ * can safely continue.
*/
ret = i915_gem_object_flush_gpu(obj, false);
- if (ret) {
- i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
+ (void) ret;
}
ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
@@ -2045,6 +2045,31 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
atomic_read(&obj->pending_flip) == 0);
}
+static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct intel_encoder *encoder;
+
+ /*
+ * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
+ * must be driven by its own crtc; no sharing is possible.
+ */
+ list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
+ if (encoder->base.crtc != crtc)
+ continue;
+
+ switch (encoder->type) {
+ case INTEL_OUTPUT_EDP:
+ if (!intel_encoder_is_pch_edp(&encoder->base))
+ return false;
+ continue;
+ }
+ }
+
+ return true;
+}
+
static void ironlake_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -2053,6 +2078,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
u32 reg, temp;
+ bool is_pch_port = false;
if (intel_crtc->active)
return;
@@ -2066,7 +2092,56 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
}
- ironlake_fdi_enable(crtc);
+ is_pch_port = intel_crtc_driving_pch(crtc);
+
+ if (is_pch_port)
+ ironlake_fdi_enable(crtc);
+ else {
+ /* disable CPU FDI tx and PCH FDI rx */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
+ POSTING_READ(reg);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~(0x7 << 16);
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+ I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
+
+ POSTING_READ(reg);
+ udelay(100);
+
+ /* Ironlake workaround, disable clock pointer after downing FDI */
+ if (HAS_PCH_IBX(dev))
+ I915_WRITE(FDI_RX_CHICKEN(pipe),
+ I915_READ(FDI_RX_CHICKEN(pipe) &
+ ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
+
+ /* still set train pattern 1 */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ I915_WRITE(reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ }
+ /* BPC in FDI rx is consistent with that in PIPECONF */
+ temp &= ~(0x07 << 16);
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
+ udelay(100);
+ }
/* Enable panel fitting for LVDS */
if (dev_priv->pch_pf_size &&
@@ -2100,6 +2175,10 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
intel_flush_display_plane(dev, plane);
}
+ /* Skip the PCH stuff if possible */
+ if (!is_pch_port)
+ goto done;
+
/* For PCH output, training FDI link */
if (IS_GEN6(dev))
gen6_fdi_link_train(crtc);
@@ -2184,7 +2263,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
I915_WRITE(reg, temp | TRANS_ENABLE);
if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
DRM_ERROR("failed to enable transcoder %d\n", pipe);
-
+done:
intel_crtc_load_lut(crtc);
intel_update_fbc(dev);
intel_crtc_update_cursor(crtc, true);
@@ -6203,7 +6282,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
* userspace...
*/
I915_WRITE(GEN6_RC_STATE, 0);
- __gen6_force_wake_get(dev_priv);
+ __gen6_gt_force_wake_get(dev_priv);
/* disable the counters and set deterministic thresholds */
I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -6301,7 +6380,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
/* enable all PM interrupts */
I915_WRITE(GEN6_PMINTRMSK, 0);
- __gen6_force_wake_put(dev_priv);
+ __gen6_gt_force_wake_put(dev_priv);
}
void intel_enable_clock_gating(struct drm_device *dev)
@@ -6496,7 +6575,7 @@ static void ironlake_disable_rc6(struct drm_device *dev)
POSTING_READ(RSTDBYCTL);
}
- ironlake_disable_rc6(dev);
+ ironlake_teardown_rc6(dev);
}
static int ironlake_setup_rc6(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index c65992d..d860abe 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -30,8 +30,6 @@
#include "intel_drv.h"
-#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
-
void
intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode)
@@ -112,19 +110,6 @@ done:
dev_priv->pch_pf_size = (width << 16) | height;
}
-static int is_backlight_combination_mode(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (INTEL_INFO(dev)->gen >= 4)
- return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE;
-
- if (IS_GEN2(dev))
- return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE;
-
- return 0;
-}
-
static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
{
u32 val;
@@ -181,9 +166,6 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
if (INTEL_INFO(dev)->gen < 4)
max &= ~1;
}
-
- if (is_backlight_combination_mode(dev))
- max *= 0xff;
}
DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
@@ -201,15 +183,6 @@ u32 intel_panel_get_backlight(struct drm_device *dev)
val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
if (IS_PINEVIEW(dev))
val >>= 1;
-
- if (is_backlight_combination_mode(dev)){
- u8 lbpc;
-
- val &= ~1;
- pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
- val *= lbpc;
- val >>= 1;
- }
}
DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
@@ -232,16 +205,6 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level)
if (HAS_PCH_SPLIT(dev))
return intel_pch_panel_set_backlight(dev, level);
-
- if (is_backlight_combination_mode(dev)){
- u32 max = intel_panel_get_max_backlight(dev);
- u8 lpbc;
-
- lpbc = level * 0xfe / max + 1;
- level /= lpbc;
- pci_write_config_byte(dev->pdev, PCI_LBPC, lpbc);
- }
-
tmp = I915_READ(BLC_PWM_CTL);
if (IS_PINEVIEW(dev)) {
tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 6d6fde8..3430686 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -14,22 +14,23 @@ struct intel_hw_status_page {
struct drm_i915_gem_object *obj;
};
-#define I915_RING_READ(reg) i915_safe_read(dev_priv, reg)
+#define I915_RING_READ(reg) i915_gt_read(dev_priv, reg)
+#define I915_RING_WRITE(reg, val) i915_gt_write(dev_priv, reg, val)
#define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base))
-#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
+#define I915_WRITE_TAIL(ring, val) I915_RING_WRITE(RING_TAIL((ring)->mmio_base), val)
#define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base))
-#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
+#define I915_WRITE_START(ring, val) I915_RING_WRITE(RING_START((ring)->mmio_base), val)
#define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD((ring)->mmio_base))
-#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
+#define I915_WRITE_HEAD(ring, val) I915_RING_WRITE(RING_HEAD((ring)->mmio_base), val)
#define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base))
-#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
+#define I915_WRITE_CTL(ring, val) I915_RING_WRITE(RING_CTL((ring)->mmio_base), val)
-#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
#define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base))
+#define I915_WRITE_IMR(ring, val) I915_RING_WRITE(RING_IMR((ring)->mmio_base), val)
#define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base))
#define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base))
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 49e5e99..6bdab89 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -6228,7 +6228,7 @@ parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb,
entry->tvconf.has_component_output = false;
break;
case OUTPUT_LVDS:
- if ((conn & 0x00003f00) != 0x10)
+ if ((conn & 0x00003f00) >> 8 != 0x10)
entry->lvdsconf.use_straps_for_mode = true;
entry->lvdsconf.use_power_scripts = true;
break;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index a7fae26..a521840 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -49,7 +49,10 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
DRM_ERROR("bo %p still attached to GEM object\n", bo);
nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
- nouveau_vm_put(&nvbo->vma);
+ if (nvbo->vma.node) {
+ nouveau_vm_unmap(&nvbo->vma);
+ nouveau_vm_put(&nvbo->vma);
+ }
kfree(nvbo);
}
@@ -128,6 +131,7 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
}
}
+ nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
nouveau_bo_placement_set(nvbo, flags, 0);
nvbo->channel = chan;
@@ -166,17 +170,17 @@ static void
set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
+ int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
if (dev_priv->card_type == NV_10 &&
- nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM)) {
+ nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
+ nvbo->bo.mem.num_pages < vram_pages / 2) {
/*
* Make sure that the color and depth buffers are handled
* by independent memory controller units. Up to a 9x
* speed up when alpha-blending and depth-test are enabled
* at the same time.
*/
- int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
-
if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
nvbo->placement.fpfn = vram_pages / 2;
nvbo->placement.lpfn = ~0;
@@ -785,7 +789,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
goto out;
- ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
out:
ttm_bo_mem_put(bo, &tmp_mem);
return ret;
@@ -811,11 +815,11 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
return ret;
- ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem);
+ ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
if (ret)
goto out;
- ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
+ ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem);
if (ret)
goto out;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index a21e000..390d82c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -507,6 +507,7 @@ nouveau_connector_native_mode(struct drm_connector *connector)
int high_w = 0, high_h = 0, high_v = 0;
list_for_each_entry(mode, &nv_connector->base.probed_modes, head) {
+ mode->vrefresh = drm_mode_vrefresh(mode);
if (helper->mode_valid(connector, mode) != MODE_OK ||
(mode->flags & DRM_MODE_FLAG_INTERLACE))
continue;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 65699bf..b368ed7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -83,7 +83,8 @@ nouveau_dma_init(struct nouveau_channel *chan)
return ret;
/* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */
- ret = nouveau_notifier_alloc(chan, NvNotify0, 32, &chan->m2mf_ntfy);
+ ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfd0, 0x1000,
+ &chan->m2mf_ntfy);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 9821fca..982d70b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -852,7 +852,8 @@ extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
extern int nouveau_notifier_init_channel(struct nouveau_channel *);
extern void nouveau_notifier_takedown_channel(struct nouveau_channel *);
extern int nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle,
- int cout, uint32_t *offset);
+ int cout, uint32_t start, uint32_t end,
+ uint32_t *offset);
extern int nouveau_notifier_offset(struct nouveau_gpuobj *, uint32_t *);
extern int nouveau_ioctl_notifier_alloc(struct drm_device *, void *data,
struct drm_file *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 26347b7..b0fb9bd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -725,8 +725,10 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
ret = vram->get(dev, mem->num_pages << PAGE_SHIFT,
mem->page_alignment << PAGE_SHIFT, size_nc,
(nvbo->tile_flags >> 8) & 0xff, &node);
- if (ret)
- return ret;
+ if (ret) {
+ mem->mm_node = NULL;
+ return (ret == -ENOSPC) ? 0 : ret;
+ }
node->page_shift = 12;
if (nvbo->vma.node)
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/nouveau_mm.c
index 8844b50..7609756 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.c
@@ -123,7 +123,7 @@ nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
return 0;
}
- return -ENOMEM;
+ return -ENOSPC;
}
int
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index fe29d60..5ea1676 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -96,7 +96,8 @@ nouveau_notifier_gpuobj_dtor(struct drm_device *dev,
int
nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
- int size, uint32_t *b_offset)
+ int size, uint32_t start, uint32_t end,
+ uint32_t *b_offset)
{
struct drm_device *dev = chan->dev;
struct nouveau_gpuobj *nobj = NULL;
@@ -104,9 +105,10 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
uint32_t offset;
int target, ret;
- mem = drm_mm_search_free(&chan->notifier_heap, size, 0, 0);
+ mem = drm_mm_search_free_in_range(&chan->notifier_heap, size, 0,
+ start, end, 0);
if (mem)
- mem = drm_mm_get_block(mem, size, 0);
+ mem = drm_mm_get_block_range(mem, size, 0, start, end);
if (!mem) {
NV_ERROR(dev, "Channel %d notifier block full\n", chan->id);
return -ENOMEM;
@@ -177,7 +179,8 @@ nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data,
if (IS_ERR(chan))
return PTR_ERR(chan);
- ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset);
+ ret = nouveau_notifier_alloc(chan, na->handle, na->size, 0, 0x1000,
+ &na->offset);
nouveau_channel_put(&chan);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index f05c0cd..4399e2f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -543,7 +543,7 @@ nouveau_pm_resume(struct drm_device *dev)
struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
struct nouveau_pm_level *perflvl;
- if (pm->cur == &pm->boot)
+ if (!pm->cur || pm->cur == &pm->boot)
return;
perflvl = pm->cur;
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index ef23550..c82db37 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -342,8 +342,8 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
if (nv_encoder->dcb->type == OUTPUT_LVDS) {
bool duallink, dummy;
- nouveau_bios_parse_lvds_table(dev, nv_connector->native_mode->
- clock, &duallink, &dummy);
+ nouveau_bios_parse_lvds_table(dev, output_mode->clock,
+ &duallink, &dummy);
if (duallink)
regp->fp_control |= (8 << 28);
} else
@@ -518,8 +518,6 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
return;
if (nv_encoder->dcb->lvdsconf.use_power_scripts) {
- struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder);
-
/* when removing an output, crtc may not be set, but PANEL_OFF
* must still be run
*/
@@ -527,12 +525,8 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
nv04_dfp_get_bound_head(dev, nv_encoder->dcb);
if (mode == DRM_MODE_DPMS_ON) {
- if (!nv_connector->native_mode) {
- NV_ERROR(dev, "Not turning on LVDS without native mode\n");
- return;
- }
call_lvds_script(dev, nv_encoder->dcb, head,
- LVDS_PANEL_ON, nv_connector->native_mode->clock);
+ LVDS_PANEL_ON, nv_encoder->mode.clock);
} else
/* pxclk of 0 is fine for PANEL_OFF, and for a
* disconnected LVDS encoder there is no native_mode
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index 8870d72..18d30c2 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -211,18 +211,32 @@ nv40_graph_set_tile_region(struct drm_device *dev, int i)
struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
switch (dev_priv->chipset) {
+ case 0x40:
+ case 0x41: /* guess */
+ case 0x42:
+ case 0x43:
+ case 0x45: /* guess */
+ case 0x4e:
+ nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
+ nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
+ nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
+ nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
+ nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
+ nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
+ break;
case 0x44:
case 0x4a:
- case 0x4e:
nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
break;
-
case 0x46:
case 0x47:
case 0x49:
case 0x4b:
+ case 0x4c:
+ case 0x67:
+ default:
nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch);
nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit);
nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr);
@@ -230,15 +244,6 @@ nv40_graph_set_tile_region(struct drm_device *dev, int i)
nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
break;
-
- default:
- nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
- nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
- nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
- nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
- nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
- nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
- break;
}
}
@@ -396,17 +401,20 @@ nv40_graph_init(struct drm_device *dev)
break;
default:
switch (dev_priv->chipset) {
- case 0x46:
- case 0x47:
- case 0x49:
- case 0x4b:
- nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0));
- nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1));
- break;
- default:
+ case 0x41:
+ case 0x42:
+ case 0x43:
+ case 0x45:
+ case 0x4e:
+ case 0x44:
+ case 0x4a:
nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0));
nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1));
break;
+ default:
+ nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0));
+ nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1));
+ break;
}
nv_wr32(dev, 0x4069F0, nv_rd32(dev, NV04_PFB_CFG0));
nv_wr32(dev, 0x4069F4, nv_rd32(dev, NV04_PFB_CFG1));
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index ea00418..e57caa2 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -403,16 +403,24 @@ nv50_instmem_unmap(struct nouveau_gpuobj *gpuobj)
void
nv50_instmem_flush(struct drm_device *dev)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ spin_lock(&dev_priv->ramin_lock);
nv_wr32(dev, 0x00330c, 0x00000001);
if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000))
NV_ERROR(dev, "PRAMIN flush timeout\n");
+ spin_unlock(&dev_priv->ramin_lock);
}
void
nv84_instmem_flush(struct drm_device *dev)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ spin_lock(&dev_priv->ramin_lock);
nv_wr32(dev, 0x070000, 0x00000001);
if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000))
NV_ERROR(dev, "PRAMIN flush timeout\n");
+ spin_unlock(&dev_priv->ramin_lock);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c
index 459ff08..6144156 100644
--- a/drivers/gpu/drm/nouveau/nv50_vm.c
+++ b/drivers/gpu/drm/nouveau/nv50_vm.c
@@ -169,7 +169,11 @@ nv50_vm_flush(struct nouveau_vm *vm)
void
nv50_vm_flush_engine(struct drm_device *dev, int engine)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ spin_lock(&dev_priv->ramin_lock);
nv_wr32(dev, 0x100c80, (engine << 16) | 1);
if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000))
NV_ERROR(dev, "vm flush timeout: engine %d\n", engine);
+ spin_unlock(&dev_priv->ramin_lock);
}
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 095bc50..a4e5e53 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -557,9 +557,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
/* use recommended ref_div for ss */
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
if (ss_enabled) {
if (ss->refdiv) {
+ pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
pll->flags |= RADEON_PLL_USE_REF_DIV;
pll->reference_div = ss->refdiv;
if (ASIC_IS_AVIVO(rdev))
@@ -662,10 +662,12 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
index, (uint32_t *)&args);
adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
if (args.v3.sOutput.ucRefDiv) {
+ pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
pll->flags |= RADEON_PLL_USE_REF_DIV;
pll->reference_div = args.v3.sOutput.ucRefDiv;
}
if (args.v3.sOutput.ucPostDiv) {
+ pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
pll->flags |= RADEON_PLL_USE_POST_DIV;
pll->post_div = args.v3.sOutput.ucPostDiv;
}
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 56deae5..93fa735 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -3490,7 +3490,7 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track
track->num_texture = 16;
track->maxy = 4096;
track->separate_cube = 0;
- track->aaresolve = true;
+ track->aaresolve = false;
track->aa.robj = NULL;
}
@@ -3801,8 +3801,6 @@ static int r100_startup(struct radeon_device *rdev)
r100_mc_program(rdev);
/* Resume clock */
r100_clock_startup(rdev);
- /* Initialize GPU configuration (# pipes, ...) */
-// r100_gpu_init(rdev);
/* Initialize GART (initialize after TTM so we can allocate
* memory through TTM but finalize after TTM) */
r100_enable_bm(rdev);
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 768c60e..069efa8 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -910,6 +910,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
track->textures[i].compress_format = R100_TRACK_COMP_NONE;
break;
case R300_TX_FORMAT_X16:
+ case R300_TX_FORMAT_FL_I16:
case R300_TX_FORMAT_Y8X8:
case R300_TX_FORMAT_Z5Y6X5:
case R300_TX_FORMAT_Z6Y5X5:
@@ -922,6 +923,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
track->textures[i].compress_format = R100_TRACK_COMP_NONE;
break;
case R300_TX_FORMAT_Y16X16:
+ case R300_TX_FORMAT_FL_I16A16:
case R300_TX_FORMAT_Z11Y11X10:
case R300_TX_FORMAT_Z10Y11X11:
case R300_TX_FORMAT_W8Z8Y8X8:
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 0e65709..3e7e7f9 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -971,7 +971,7 @@ void radeon_compute_pll_legacy(struct radeon_pll *pll,
max_fractional_feed_div = pll->max_frac_feedback_div;
}
- for (post_div = min_post_div; post_div <= max_post_div; ++post_div) {
+ for (post_div = max_post_div; post_div >= min_post_div; --post_div) {
uint32_t ref_div;
if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 66324b5..cc44bdf 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -113,11 +113,14 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
u32 tiling_flags = 0;
int ret;
int aligned_size, size;
+ int height = mode_cmd->height;
/* need to align pitch with crtc limits */
mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8);
- size = mode_cmd->pitch * mode_cmd->height;
+ if (rdev->family >= CHIP_R600)
+ height = ALIGN(mode_cmd->height, 8);
+ size = mode_cmd->pitch * height;
aligned_size = ALIGN(size, PAGE_SIZE);
ret = radeon_gem_object_create(rdev, aligned_size, 0,
RADEON_GEM_DOMAIN_VRAM,
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 773e484..297bc9a 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -238,13 +238,13 @@ config SENSORS_K8TEMP
will be called k8temp.
config SENSORS_K10TEMP
- tristate "AMD Phenom/Sempron/Turion/Opteron temperature sensor"
+ tristate "AMD Family 10h/11h/12h/14h temperature sensor"
depends on X86 && PCI
help
If you say yes here you get support for the temperature
sensor(s) inside your CPU. Supported are later revisions of
- the AMD Family 10h and all revisions of the AMD Family 11h
- microarchitectures.
+ the AMD Family 10h and all revisions of the AMD Family 11h,
+ 12h (Llano), and 14h (Brazos) microarchitectures.
This driver can also be built as a module. If so, the module
will be called k10temp.
@@ -455,13 +455,14 @@ config SENSORS_JZ4740
called jz4740-hwmon.
config SENSORS_JC42
- tristate "JEDEC JC42.4 compliant temperature sensors"
+ tristate "JEDEC JC42.4 compliant memory module temperature sensors"
depends on I2C
help
- If you say yes here you get support for Jedec JC42.4 compliant
- temperature sensors. Support will include, but not be limited to,
- ADT7408, CAT34TS02,, CAT6095, MAX6604, MCP9805, MCP98242, MCP98243,
- MCP9843, SE97, SE98, STTS424, TSE2002B3, and TS3000B3.
+ If you say yes here, you get support for JEDEC JC42.4 compliant
+ temperature sensors, which are used on many DDR3 memory modules for
+ mobile devices and servers. Support will include, but not be limited
+ to, ADT7408, CAT34TS02, CAT6095, MAX6604, MCP9805, MCP98242, MCP98243,
+ MCP9843, SE97, SE98, STTS424(E), TSE2002B3, and TS3000B3.
This driver can also be built as a module. If so, the module
will be called jc42.
@@ -574,7 +575,7 @@ config SENSORS_LM85
help
If you say yes here you get support for National Semiconductor LM85
sensor chips and clones: ADM1027, ADT7463, ADT7468, EMC6D100,
- EMC6D101 and EMC6D102.
+ EMC6D101, EMC6D102, and EMC6D103.
This driver can also be built as a module. If so, the module
will be called lm85.
diff --git a/drivers/hwmon/ad7414.c b/drivers/hwmon/ad7414.c
index 86d822a..d46c0c7 100644
--- a/drivers/hwmon/ad7414.c
+++ b/drivers/hwmon/ad7414.c
@@ -242,6 +242,7 @@ static const struct i2c_device_id ad7414_id[] = {
{ "ad7414", 0 },
{}
};
+MODULE_DEVICE_TABLE(i2c, ad7414_id);
static struct i2c_driver ad7414_driver = {
.driver = {
diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c
index f13c843..5cc3e37 100644
--- a/drivers/hwmon/adt7411.c
+++ b/drivers/hwmon/adt7411.c
@@ -334,6 +334,7 @@ static const struct i2c_device_id adt7411_id[] = {
{ "adt7411", 0 },
{ }
};
+MODULE_DEVICE_TABLE(i2c, adt7411_id);
static struct i2c_driver adt7411_driver = {
.driver = {
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index 340fc78..9349912 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -53,6 +53,8 @@ static const unsigned short normal_i2c[] = {
/* Configuration register defines */
#define JC42_CFG_CRIT_ONLY (1 << 2)
+#define JC42_CFG_TCRIT_LOCK (1 << 6)
+#define JC42_CFG_EVENT_LOCK (1 << 7)
#define JC42_CFG_SHUTDOWN (1 << 8)
#define JC42_CFG_HYST_SHIFT 9
#define JC42_CFG_HYST_MASK 0x03
@@ -332,7 +334,7 @@ static ssize_t set_temp_crit_hyst(struct device *dev,
{
struct i2c_client *client = to_i2c_client(dev);
struct jc42_data *data = i2c_get_clientdata(client);
- long val;
+ unsigned long val;
int diff, hyst;
int err;
int ret = count;
@@ -380,14 +382,14 @@ static ssize_t show_alarm(struct device *dev,
static DEVICE_ATTR(temp1_input, S_IRUGO,
show_temp_input, NULL);
-static DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO,
+static DEVICE_ATTR(temp1_crit, S_IRUGO,
show_temp_crit, set_temp_crit);
-static DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO,
+static DEVICE_ATTR(temp1_min, S_IRUGO,
show_temp_min, set_temp_min);
-static DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO,
+static DEVICE_ATTR(temp1_max, S_IRUGO,
show_temp_max, set_temp_max);
-static DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO,
+static DEVICE_ATTR(temp1_crit_hyst, S_IRUGO,
show_temp_crit_hyst, set_temp_crit_hyst);
static DEVICE_ATTR(temp1_max_hyst, S_IRUGO,
show_temp_max_hyst, NULL);
@@ -412,8 +414,31 @@ static struct attribute *jc42_attributes[] = {
NULL
};
+static mode_t jc42_attribute_mode(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct i2c_client *client = to_i2c_client(dev);
+ struct jc42_data *data = i2c_get_clientdata(client);
+ unsigned int config = data->config;
+ bool readonly;
+
+ if (attr == &dev_attr_temp1_crit.attr)
+ readonly = config & JC42_CFG_TCRIT_LOCK;
+ else if (attr == &dev_attr_temp1_min.attr ||
+ attr == &dev_attr_temp1_max.attr)
+ readonly = config & JC42_CFG_EVENT_LOCK;
+ else if (attr == &dev_attr_temp1_crit_hyst.attr)
+ readonly = config & (JC42_CFG_EVENT_LOCK | JC42_CFG_TCRIT_LOCK);
+ else
+ readonly = true;
+
+ return S_IRUGO | (readonly ? 0 : S_IWUSR);
+}
+
static const struct attribute_group jc42_group = {
.attrs = jc42_attributes,
+ .is_visible = jc42_attribute_mode,
};
/* Return 0 if detection is successful, -ENODEV otherwise */
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index da5a240..82bf65a 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -1,5 +1,5 @@
/*
- * k10temp.c - AMD Family 10h/11h processor hardware monitoring
+ * k10temp.c - AMD Family 10h/11h/12h/14h processor hardware monitoring
*
* Copyright (c) 2009 Clemens Ladisch <clemens@ladisch.de>
*
@@ -25,7 +25,7 @@
#include <linux/pci.h>
#include <asm/processor.h>
-MODULE_DESCRIPTION("AMD Family 10h/11h CPU core temperature monitor");
+MODULE_DESCRIPTION("AMD Family 10h/11h/12h/14h CPU core temperature monitor");
MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
MODULE_LICENSE("GPL");
@@ -208,6 +208,7 @@ static void __devexit k10temp_remove(struct pci_dev *pdev)
static const struct pci_device_id k10temp_id_table[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
{}
};
MODULE_DEVICE_TABLE(pci, k10temp_id_table);
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index 1e22984..d2cc286 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -41,7 +41,7 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
enum chips {
any_chip, lm85b, lm85c,
adm1027, adt7463, adt7468,
- emc6d100, emc6d102
+ emc6d100, emc6d102, emc6d103
};
/* The LM85 registers */
@@ -90,6 +90,9 @@ enum chips {
#define LM85_VERSTEP_EMC6D100_A0 0x60
#define LM85_VERSTEP_EMC6D100_A1 0x61
#define LM85_VERSTEP_EMC6D102 0x65
+#define LM85_VERSTEP_EMC6D103_A0 0x68
+#define LM85_VERSTEP_EMC6D103_A1 0x69
+#define LM85_VERSTEP_EMC6D103S 0x6A /* Also known as EMC6D103:A2 */
#define LM85_REG_CONFIG 0x40
@@ -348,6 +351,7 @@ static const struct i2c_device_id lm85_id[] = {
{ "emc6d100", emc6d100 },
{ "emc6d101", emc6d100 },
{ "emc6d102", emc6d102 },
+ { "emc6d103", emc6d103 },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm85_id);
@@ -1250,6 +1254,20 @@ static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info)
case LM85_VERSTEP_EMC6D102:
type_name = "emc6d102";
break;
+ case LM85_VERSTEP_EMC6D103_A0:
+ case LM85_VERSTEP_EMC6D103_A1:
+ type_name = "emc6d103";
+ break;
+ /*
+ * Registers apparently missing in EMC6D103S/EMC6D103:A2
+ * compared to EMC6D103:A0, EMC6D103:A1, and EMC6D102
+ * (according to the data sheets), but used unconditionally
+ * in the driver: 62[5:7], 6D[0:7], and 6E[0:7].
+ * So skip EMC6D103S for now.
+ case LM85_VERSTEP_EMC6D103S:
+ type_name = "emc6d103s";
+ break;
+ */
}
} else {
dev_dbg(&adapter->dev,
@@ -1283,6 +1301,7 @@ static int lm85_probe(struct i2c_client *client,
case adt7468:
case emc6d100:
case emc6d102:
+ case emc6d103:
data->freq_map = adm1027_freq_map;
break;
default:
@@ -1468,7 +1487,7 @@ static struct lm85_data *lm85_update_device(struct device *dev)
/* More alarm bits */
data->alarms |= lm85_read_value(client,
EMC6D100_REG_ALARM3) << 16;
- } else if (data->type == emc6d102) {
+ } else if (data->type == emc6d102 || data->type == emc6d103) {
/* Have to read LSB bits after the MSB ones because
the reading of the MSB bits has frozen the
LSBs (backward from the ADM1027).
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index b605ff3..829a2a1 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -847,11 +847,15 @@ complete:
dev_err(dev->dev, "Arbitration lost\n");
err |= OMAP_I2C_STAT_AL;
}
+ /*
+ * ProDB0017052: Clear ARDY bit twice
+ */
if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK |
OMAP_I2C_STAT_AL)) {
omap_i2c_ack_stat(dev, stat &
(OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR |
- OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR));
+ OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR |
+ OMAP_I2C_STAT_ARDY));
omap_i2c_complete_cmd(dev, err);
return IRQ_HANDLED;
}
@@ -1137,12 +1141,41 @@ omap_i2c_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_SUSPEND
+static int omap_i2c_suspend(struct device *dev)
+{
+ if (!pm_runtime_suspended(dev))
+ if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend)
+ dev->bus->pm->runtime_suspend(dev);
+
+ return 0;
+}
+
+static int omap_i2c_resume(struct device *dev)
+{
+ if (!pm_runtime_suspended(dev))
+ if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume)
+ dev->bus->pm->runtime_resume(dev);
+
+ return 0;
+}
+
+static struct dev_pm_ops omap_i2c_pm_ops = {
+ .suspend = omap_i2c_suspend,
+ .resume = omap_i2c_resume,
+};
+#define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops)
+#else
+#define OMAP_I2C_PM_OPS NULL
+#endif
+
static struct platform_driver omap_i2c_driver = {
.probe = omap_i2c_probe,
.remove = omap_i2c_remove,
.driver = {
.name = "omap_i2c",
.owner = THIS_MODULE,
+ .pm = OMAP_I2C_PM_OPS,
},
};
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c
index 495be45..266135d 100644
--- a/drivers/i2c/busses/i2c-stu300.c
+++ b/drivers/i2c/busses/i2c-stu300.c
@@ -942,7 +942,7 @@ stu300_probe(struct platform_device *pdev)
adap->owner = THIS_MODULE;
/* DDC class but actually often used for more generic I2C */
adap->class = I2C_CLASS_DDC;
- strncpy(adap->name, "ST Microelectronics DDC I2C adapter",
+ strlcpy(adap->name, "ST Microelectronics DDC I2C adapter",
sizeof(adap->name));
adap->nr = bus_nr;
adap->algo = &stu300_algo;
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 1fa091e..4a5c4a4 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -62,6 +62,7 @@
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <asm/mwait.h>
+#include <asm/msr.h>
#define INTEL_IDLE_VERSION "0.4"
#define PREFIX "intel_idle: "
@@ -85,6 +86,12 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state);
static struct cpuidle_state *cpuidle_state_table;
/*
+ * Hardware C-state auto-demotion may not always be optimal.
+ * Indicate which enable bits to clear here.
+ */
+static unsigned long long auto_demotion_disable_flags;
+
+/*
* Set this flag for states where the HW flushes the TLB for us
* and so we don't need cross-calls to keep it consistent.
* If this flag is set, SW flushes the TLB, so even if the
@@ -281,6 +288,15 @@ static struct notifier_block setup_broadcast_notifier = {
.notifier_call = setup_broadcast_cpuhp_notify,
};
+static void auto_demotion_disable(void *dummy)
+{
+ unsigned long long msr_bits;
+
+ rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
+ msr_bits &= ~auto_demotion_disable_flags;
+ wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
+}
+
/*
* intel_idle_probe()
*/
@@ -324,11 +340,17 @@ static int intel_idle_probe(void)
case 0x25: /* Westmere */
case 0x2C: /* Westmere */
cpuidle_state_table = nehalem_cstates;
+ auto_demotion_disable_flags =
+ (NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE);
break;
case 0x1C: /* 28 - Atom Processor */
+ cpuidle_state_table = atom_cstates;
+ break;
+
case 0x26: /* 38 - Lincroft Atom Processor */
cpuidle_state_table = atom_cstates;
+ auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE;
break;
case 0x2A: /* SNB */
@@ -436,6 +458,8 @@ static int intel_idle_cpuidle_devices_init(void)
return -EIO;
}
}
+ if (auto_demotion_disable_flags)
+ smp_call_function(auto_demotion_disable, NULL, 1);
return 0;
}
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 8b606fd..08c1948 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -2610,9 +2610,11 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
netif_carrier_on(nesvnic->netdev);
spin_lock(&nesvnic->port_ibevent_lock);
- if (nesdev->iw_status == 0) {
- nesdev->iw_status = 1;
- nes_port_ibevent(nesvnic);
+ if (nesvnic->of_device_registered) {
+ if (nesdev->iw_status == 0) {
+ nesdev->iw_status = 1;
+ nes_port_ibevent(nesvnic);
+ }
}
spin_unlock(&nesvnic->port_ibevent_lock);
}
@@ -2642,9 +2644,11 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
netif_carrier_off(nesvnic->netdev);
spin_lock(&nesvnic->port_ibevent_lock);
- if (nesdev->iw_status == 1) {
- nesdev->iw_status = 0;
- nes_port_ibevent(nesvnic);
+ if (nesvnic->of_device_registered) {
+ if (nesdev->iw_status == 1) {
+ nesdev->iw_status = 0;
+ nes_port_ibevent(nesvnic);
+ }
}
spin_unlock(&nesvnic->port_ibevent_lock);
}
@@ -2703,9 +2707,11 @@ void nes_recheck_link_status(struct work_struct *work)
netif_carrier_on(nesvnic->netdev);
spin_lock(&nesvnic->port_ibevent_lock);
- if (nesdev->iw_status == 0) {
- nesdev->iw_status = 1;
- nes_port_ibevent(nesvnic);
+ if (nesvnic->of_device_registered) {
+ if (nesdev->iw_status == 0) {
+ nesdev->iw_status = 1;
+ nes_port_ibevent(nesvnic);
+ }
}
spin_unlock(&nesvnic->port_ibevent_lock);
}
@@ -2723,9 +2729,11 @@ void nes_recheck_link_status(struct work_struct *work)
netif_carrier_off(nesvnic->netdev);
spin_lock(&nesvnic->port_ibevent_lock);
- if (nesdev->iw_status == 1) {
- nesdev->iw_status = 0;
- nes_port_ibevent(nesvnic);
+ if (nesvnic->of_device_registered) {
+ if (nesdev->iw_status == 1) {
+ nesdev->iw_status = 0;
+ nes_port_ibevent(nesvnic);
+ }
}
spin_unlock(&nesvnic->port_ibevent_lock);
}
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 8245237..eca0c41 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -1005,7 +1005,8 @@ void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr)
* there are still requests that haven't been acked.
*/
if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
- !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN)))
+ !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN)) &&
+ (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
start_timer(qp);
while (qp->s_last != qp->s_acked) {
@@ -1439,6 +1440,8 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
}
spin_lock_irqsave(&qp->s_lock, flags);
+ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
+ goto ack_done;
/* Ignore invalid responses. */
if (qib_cmp24(psn, qp->s_next_psn) >= 0)
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index 23cf8fc..5b8f59d 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -360,7 +360,7 @@ static int gameport_queue_event(void *object, struct module *owner,
event->owner = owner;
list_add_tail(&event->node, &gameport_event_list);
- schedule_work(&gameport_event_work);
+ queue_work(system_long_wq, &gameport_event_work);
out:
spin_unlock_irqrestore(&gameport_event_lock, flags);
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index ac471b7..99ce903 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -71,8 +71,9 @@ struct tegra_kbc {
spinlock_t lock;
unsigned int repoll_dly;
unsigned long cp_dly_jiffies;
+ bool use_fn_map;
const struct tegra_kbc_platform_data *pdata;
- unsigned short keycode[KBC_MAX_KEY];
+ unsigned short keycode[KBC_MAX_KEY * 2];
unsigned short current_keys[KBC_MAX_KPENT];
unsigned int num_pressed_keys;
struct timer_list timer;
@@ -178,6 +179,40 @@ static const u32 tegra_kbc_default_keymap[] = {
KEY(15, 5, KEY_F2),
KEY(15, 6, KEY_CAPSLOCK),
KEY(15, 7, KEY_F6),
+
+ /* Software Handled Function Keys */
+ KEY(20, 0, KEY_KP7),
+
+ KEY(21, 0, KEY_KP9),
+ KEY(21, 1, KEY_KP8),
+ KEY(21, 2, KEY_KP4),
+ KEY(21, 4, KEY_KP1),
+
+ KEY(22, 1, KEY_KPSLASH),
+ KEY(22, 2, KEY_KP6),
+ KEY(22, 3, KEY_KP5),
+ KEY(22, 4, KEY_KP3),
+ KEY(22, 5, KEY_KP2),
+ KEY(22, 7, KEY_KP0),
+
+ KEY(27, 1, KEY_KPASTERISK),
+ KEY(27, 3, KEY_KPMINUS),
+ KEY(27, 4, KEY_KPPLUS),
+ KEY(27, 5, KEY_KPDOT),
+
+ KEY(28, 5, KEY_VOLUMEUP),
+
+ KEY(29, 3, KEY_HOME),
+ KEY(29, 4, KEY_END),
+ KEY(29, 5, KEY_BRIGHTNESSDOWN),
+ KEY(29, 6, KEY_VOLUMEDOWN),
+ KEY(29, 7, KEY_BRIGHTNESSUP),
+
+ KEY(30, 0, KEY_NUMLOCK),
+ KEY(30, 1, KEY_SCROLLLOCK),
+ KEY(30, 2, KEY_MUTE),
+
+ KEY(31, 4, KEY_HELP),
};
static const struct matrix_keymap_data tegra_kbc_default_keymap_data = {
@@ -224,6 +259,7 @@ static void tegra_kbc_report_keys(struct tegra_kbc *kbc)
unsigned int i;
unsigned int num_down = 0;
unsigned long flags;
+ bool fn_keypress = false;
spin_lock_irqsave(&kbc->lock, flags);
for (i = 0; i < KBC_MAX_KPENT; i++) {
@@ -237,11 +273,28 @@ static void tegra_kbc_report_keys(struct tegra_kbc *kbc)
MATRIX_SCAN_CODE(row, col, KBC_ROW_SHIFT);
scancodes[num_down] = scancode;
- keycodes[num_down++] = kbc->keycode[scancode];
+ keycodes[num_down] = kbc->keycode[scancode];
+ /* If driver uses Fn map, do not report the Fn key. */
+ if ((keycodes[num_down] == KEY_FN) && kbc->use_fn_map)
+ fn_keypress = true;
+ else
+ num_down++;
}
val >>= 8;
}
+
+ /*
+ * If the platform uses Fn keymaps, translate keys on a Fn keypress.
+ * Function keycodes are KBC_MAX_KEY apart from the plain keycodes.
+ */
+ if (fn_keypress) {
+ for (i = 0; i < num_down; i++) {
+ scancodes[i] += KBC_MAX_KEY;
+ keycodes[i] = kbc->keycode[scancodes[i]];
+ }
+ }
+
spin_unlock_irqrestore(&kbc->lock, flags);
tegra_kbc_report_released_keys(kbc->idev,
@@ -594,8 +647,11 @@ static int __devinit tegra_kbc_probe(struct platform_device *pdev)
input_dev->keycode = kbc->keycode;
input_dev->keycodesize = sizeof(kbc->keycode[0]);
- input_dev->keycodemax = ARRAY_SIZE(kbc->keycode);
+ input_dev->keycodemax = KBC_MAX_KEY;
+ if (pdata->use_fn_map)
+ input_dev->keycodemax *= 2;
+ kbc->use_fn_map = pdata->use_fn_map;
keymap_data = pdata->keymap_data ?: &tegra_kbc_default_keymap_data;
matrix_keypad_build_keymap(keymap_data, KBC_ROW_SHIFT,
input_dev->keycode, input_dev->keybit);
diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
index 25e5d04..7453938 100644
--- a/drivers/input/mouse/synaptics.h
+++ b/drivers/input/mouse/synaptics.h
@@ -51,6 +51,29 @@
#define SYN_EXT_CAP_REQUESTS(c) (((c) & 0x700000) >> 20)
#define SYN_CAP_MULTI_BUTTON_NO(ec) (((ec) & 0x00f000) >> 12)
#define SYN_CAP_PRODUCT_ID(ec) (((ec) & 0xff0000) >> 16)
+
+/*
+ * The following describes response for the 0x0c query.
+ *
+ * byte mask name meaning
+ * ---- ---- ------- ------------
+ * 1 0x01 adjustable threshold capacitive button sensitivity
+ * can be adjusted
+ * 1 0x02 report max query 0x0d gives max coord reported
+ * 1 0x04 clearpad sensor is ClearPad product
+ * 1 0x08 advanced gesture not particularly meaningful
+ * 1 0x10 clickpad bit 0 1-button ClickPad
+ * 1 0x60 multifinger mode identifies firmware finger counting
+ * (not reporting!) algorithm.
+ * Not particularly meaningful
+ * 1 0x80 covered pad W clipped to 14, 15 == pad mostly covered
+ * 2 0x01 clickpad bit 1 2-button ClickPad
+ * 2 0x02 deluxe LED controls touchpad support LED commands
+ * ala multimedia control bar
+ * 2 0x04 reduced filtering firmware does less filtering on
+ * position data, driver should watch
+ * for noise.
+ */
#define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100000) /* 1-button ClickPad */
#define SYN_CAP_CLICKPAD2BTN(ex0c) ((ex0c) & 0x000100) /* 2-button ClickPad */
#define SYN_CAP_MAX_DIMENSIONS(ex0c) ((ex0c) & 0x020000)
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index 7c38d1f..ba70058 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -299,7 +299,7 @@ static int serio_queue_event(void *object, struct module *owner,
event->owner = owner;
list_add_tail(&event->node, &serio_event_list);
- schedule_work(&serio_event_work);
+ queue_work(system_long_wq, &serio_event_work);
out:
spin_unlock_irqrestore(&serio_event_lock, flags);
diff --git a/drivers/isdn/hardware/eicon/istream.c b/drivers/isdn/hardware/eicon/istream.c
index 18f8798..7bd5baa 100644
--- a/drivers/isdn/hardware/eicon/istream.c
+++ b/drivers/isdn/hardware/eicon/istream.c
@@ -62,7 +62,7 @@ void diva_xdi_provide_istream_info (ADAPTER* a,
stream interface.
If synchronous service was requested, then function
does return amount of data written to stream.
- 'final' does indicate that pice of data to be written is
+ 'final' does indicate that piece of data to be written is
final part of frame (necessary only by structured datatransfer)
return 0 if zero lengh packet was written
return -1 if stream is full
diff --git a/drivers/isdn/hisax/isdnl2.c b/drivers/isdn/hisax/isdnl2.c
index 0858791..cfff0c4 100644
--- a/drivers/isdn/hisax/isdnl2.c
+++ b/drivers/isdn/hisax/isdnl2.c
@@ -1247,10 +1247,10 @@ static void
l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
{
struct PStack *st = fi->userdata;
- struct sk_buff *skb, *oskb;
+ struct sk_buff *skb;
struct Layer2 *l2 = &st->l2;
u_char header[MAX_HEADER_LEN];
- int i;
+ int i, hdr_space_needed;
int unsigned p1;
u_long flags;
@@ -1261,6 +1261,16 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
if (!skb)
return;
+ hdr_space_needed = l2headersize(l2, 0);
+ if (hdr_space_needed > skb_headroom(skb)) {
+ struct sk_buff *orig_skb = skb;
+
+ skb = skb_realloc_headroom(skb, hdr_space_needed);
+ if (!skb) {
+ dev_kfree_skb(orig_skb);
+ return;
+ }
+ }
spin_lock_irqsave(&l2->lock, flags);
if(test_bit(FLG_MOD128, &l2->flag))
p1 = (l2->vs - l2->va) % 128;
@@ -1285,19 +1295,7 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
l2->vs = (l2->vs + 1) % 8;
}
spin_unlock_irqrestore(&l2->lock, flags);
- p1 = skb->data - skb->head;
- if (p1 >= i)
- memcpy(skb_push(skb, i), header, i);
- else {
- printk(KERN_WARNING
- "isdl2 pull_iqueue skb header(%d/%d) too short\n", i, p1);
- oskb = skb;
- skb = alloc_skb(oskb->len + i, GFP_ATOMIC);
- memcpy(skb_put(skb, i), header, i);
- skb_copy_from_linear_data(oskb,
- skb_put(skb, oskb->len), oskb->len);
- dev_kfree_skb(oskb);
- }
+ memcpy(skb_push(skb, i), header, i);
st->l2.l2l1(st, PH_PULL | INDICATION, skb);
test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
if (!test_and_set_bit(FLG_T200_RUN, &st->l2.flag)) {
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 8a2f767..0ed7f6b 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -216,7 +216,6 @@ static int linear_run (mddev_t *mddev)
if (md_check_no_bitmap(mddev))
return -EINVAL;
- mddev->queue->queue_lock = &mddev->queue->__queue_lock;
conf = linear_conf(mddev, mddev->raid_disks);
if (!conf)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 0cc30ec..818313e 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -553,6 +553,9 @@ static mddev_t * mddev_find(dev_t unit)
{
mddev_t *mddev, *new = NULL;
+ if (unit && MAJOR(unit) != MD_MAJOR)
+ unit &= ~((1<<MdpMinorShift)-1);
+
retry:
spin_lock(&all_mddevs_lock);
@@ -4138,10 +4141,10 @@ array_size_store(mddev_t *mddev, const char *buf, size_t len)
}
mddev->array_sectors = sectors;
- set_capacity(mddev->gendisk, mddev->array_sectors);
- if (mddev->pers)
+ if (mddev->pers) {
+ set_capacity(mddev->gendisk, mddev->array_sectors);
revalidate_disk(mddev->gendisk);
-
+ }
return len;
}
@@ -4624,6 +4627,7 @@ static int do_md_run(mddev_t *mddev)
}
set_capacity(mddev->gendisk, mddev->array_sectors);
revalidate_disk(mddev->gendisk);
+ mddev->changed = 1;
kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
out:
return err;
@@ -4712,6 +4716,7 @@ static void md_clean(mddev_t *mddev)
mddev->sync_speed_min = mddev->sync_speed_max = 0;
mddev->recovery = 0;
mddev->in_sync = 0;
+ mddev->changed = 0;
mddev->degraded = 0;
mddev->safemode = 0;
mddev->bitmap_info.offset = 0;
@@ -4827,6 +4832,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
set_capacity(disk, 0);
mutex_unlock(&mddev->open_mutex);
+ mddev->changed = 1;
revalidate_disk(disk);
if (mddev->ro)
@@ -6011,7 +6017,7 @@ static int md_open(struct block_device *bdev, fmode_t mode)
atomic_inc(&mddev->openers);
mutex_unlock(&mddev->open_mutex);
- check_disk_size_change(mddev->gendisk, bdev);
+ check_disk_change(bdev);
out:
return err;
}
@@ -6026,6 +6032,21 @@ static int md_release(struct gendisk *disk, fmode_t mode)
return 0;
}
+
+static int md_media_changed(struct gendisk *disk)
+{
+ mddev_t *mddev = disk->private_data;
+
+ return mddev->changed;
+}
+
+static int md_revalidate(struct gendisk *disk)
+{
+ mddev_t *mddev = disk->private_data;
+
+ mddev->changed = 0;
+ return 0;
+}
static const struct block_device_operations md_fops =
{
.owner = THIS_MODULE,
@@ -6036,6 +6057,8 @@ static const struct block_device_operations md_fops =
.compat_ioctl = md_compat_ioctl,
#endif
.getgeo = md_getgeo,
+ .media_changed = md_media_changed,
+ .revalidate_disk= md_revalidate,
};
static int md_thread(void * arg)
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 7e90b85..12215d4 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -274,6 +274,8 @@ struct mddev_s
atomic_t active; /* general refcount */
atomic_t openers; /* number of active opens */
+ int changed; /* True if we might need to
+ * reread partition info */
int degraded; /* whether md should consider
* adding a spare
*/
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 6d7ddf3..3a62d44 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -435,7 +435,6 @@ static int multipath_run (mddev_t *mddev)
* bookkeeping area. [whatever we allocate in multipath_run(),
* should be freed in multipath_stop()]
*/
- mddev->queue->queue_lock = &mddev->queue->__queue_lock;
conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL);
mddev->private = conf;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 637a968..c0ac457 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -361,7 +361,6 @@ static int raid0_run(mddev_t *mddev)
if (md_check_no_bitmap(mddev))
return -EINVAL;
blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
- mddev->queue->queue_lock = &mddev->queue->__queue_lock;
/* if private is not null, we are here after takeover */
if (mddev->private == NULL) {
@@ -670,6 +669,7 @@ static void *raid0_takeover_raid1(mddev_t *mddev)
mddev->new_layout = 0;
mddev->new_chunk_sectors = 128; /* by default set chunk size to 64k */
mddev->delta_disks = 1 - mddev->raid_disks;
+ mddev->raid_disks = 1;
/* make sure it will be not marked as dirty */
mddev->recovery_cp = MaxSector;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index a23ffa3..06cd712 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -593,7 +593,10 @@ static int flush_pending_writes(conf_t *conf)
if (conf->pending_bio_list.head) {
struct bio *bio;
bio = bio_list_get(&conf->pending_bio_list);
+ /* Only take the spinlock to quiet a warning */
+ spin_lock(conf->mddev->queue->queue_lock);
blk_remove_plug(conf->mddev->queue);
+ spin_unlock(conf->mddev->queue->queue_lock);
spin_unlock_irq(&conf->device_lock);
/* flush any pending bitmap writes to
* disk before proceeding w/ I/O */
@@ -959,7 +962,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
atomic_inc(&r1_bio->remaining);
spin_lock_irqsave(&conf->device_lock, flags);
bio_list_add(&conf->pending_bio_list, mbio);
- blk_plug_device(mddev->queue);
+ blk_plug_device_unlocked(mddev->queue);
spin_unlock_irqrestore(&conf->device_lock, flags);
}
r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL);
@@ -2021,7 +2024,6 @@ static int run(mddev_t *mddev)
if (IS_ERR(conf))
return PTR_ERR(conf);
- mddev->queue->queue_lock = &conf->device_lock;
list_for_each_entry(rdev, &mddev->disks, same_set) {
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 3b607b2..747d061 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -662,7 +662,10 @@ static int flush_pending_writes(conf_t *conf)
if (conf->pending_bio_list.head) {
struct bio *bio;
bio = bio_list_get(&conf->pending_bio_list);
+ /* Spinlock only taken to quiet a warning */
+ spin_lock(conf->mddev->queue->queue_lock);
blk_remove_plug(conf->mddev->queue);
+ spin_unlock(conf->mddev->queue->queue_lock);
spin_unlock_irq(&conf->device_lock);
/* flush any pending bitmap writes to disk
* before proceeding w/ I/O */
@@ -971,7 +974,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
atomic_inc(&r10_bio->remaining);
spin_lock_irqsave(&conf->device_lock, flags);
bio_list_add(&conf->pending_bio_list, mbio);
- blk_plug_device(mddev->queue);
+ blk_plug_device_unlocked(mddev->queue);
spin_unlock_irqrestore(&conf->device_lock, flags);
}
@@ -2304,8 +2307,6 @@ static int run(mddev_t *mddev)
if (!conf)
goto out;
- mddev->queue->queue_lock = &conf->device_lock;
-
mddev->thread = conf->thread;
conf->thread = NULL;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 7028128..78536fd 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5204,7 +5204,6 @@ static int run(mddev_t *mddev)
mddev->queue->backing_dev_info.congested_data = mddev;
mddev->queue->backing_dev_info.congested_fn = raid5_congested;
- mddev->queue->queue_lock = &conf->device_lock;
mddev->queue->unplug_fn = raid5_unplug_queue;
chunk_size = mddev->chunk_sectors << 9;
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index e9a3eab..8c1d85e 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -621,7 +621,7 @@ static int __init memstick_init(void)
{
int rc;
- workqueue = create_freezeable_workqueue("kmemstick");
+ workqueue = create_freezable_workqueue("kmemstick");
if (!workqueue)
return -ENOMEM;
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index f71f229..1735c84 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -76,8 +76,8 @@
#define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR
#endif
-#define MPT_LINUX_VERSION_COMMON "3.04.17"
-#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.17"
+#define MPT_LINUX_VERSION_COMMON "3.04.18"
+#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.18"
#define WHAT_MAGIC_STRING "@" "(" "#" ")"
#define show_mptmod_ver(s,ver) \
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index a3856ed..e8deb8e 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -597,6 +597,13 @@ mptctl_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
}
static int
+mptctl_release(struct inode *inode, struct file *filep)
+{
+ fasync_helper(-1, filep, 0, &async_queue);
+ return 0;
+}
+
+static int
mptctl_fasync(int fd, struct file *filep, int mode)
{
MPT_ADAPTER *ioc;
@@ -2815,6 +2822,7 @@ static const struct file_operations mptctl_fops = {
.llseek = no_llseek,
.fasync = mptctl_fasync,
.unlocked_ioctl = mptctl_ioctl,
+ .release = mptctl_release,
#ifdef CONFIG_COMPAT
.compat_ioctl = compat_mpctl_ioctl,
#endif
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 59b8f53..0d9b82a 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1873,8 +1873,9 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
}
out:
- printk(MYIOC_s_INFO_FMT "task abort: %s (sc=%p)\n",
- ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), SCpnt);
+ printk(MYIOC_s_INFO_FMT "task abort: %s (rv=%04x) (sc=%p) (sn=%ld)\n",
+ ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), retval,
+ SCpnt, SCpnt->serial_number);
return retval;
}
@@ -1911,7 +1912,7 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt)
vdevice = SCpnt->device->hostdata;
if (!vdevice || !vdevice->vtarget) {
- retval = SUCCESS;
+ retval = 0;
goto out;
}
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index 6a1f940..c45e630 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -143,9 +143,9 @@ static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc)
unsigned long flags;
struct asic3 *asic;
- desc->chip->ack(irq);
+ desc->irq_data.chip->irq_ack(&desc->irq_data);
- asic = desc->handler_data;
+ asic = get_irq_data(irq);
for (iter = 0 ; iter < MAX_ASIC_ISR_LOOPS; iter++) {
u32 status;
diff --git a/drivers/mfd/davinci_voicecodec.c b/drivers/mfd/davinci_voicecodec.c
index 33c923d..fdd8a1b 100644
--- a/drivers/mfd/davinci_voicecodec.c
+++ b/drivers/mfd/davinci_voicecodec.c
@@ -118,12 +118,12 @@ static int __init davinci_vc_probe(struct platform_device *pdev)
/* Voice codec interface client */
cell = &davinci_vc->cells[DAVINCI_VC_VCIF_CELL];
- cell->name = "davinci_vcif";
+ cell->name = "davinci-vcif";
cell->driver_data = davinci_vc;
/* Voice codec CQ93VC client */
cell = &davinci_vc->cells[DAVINCI_VC_CQ93VC_CELL];
- cell->name = "cq93vc";
+ cell->name = "cq93vc-codec";
cell->driver_data = davinci_vc;
ret = mfd_add_devices(&pdev->dev, pdev->id, davinci_vc->cells,
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index 627cf57..e9018d1 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -150,12 +150,12 @@ static inline int __tps6586x_write(struct i2c_client *client,
static inline int __tps6586x_writes(struct i2c_client *client, int reg,
int len, uint8_t *val)
{
- int ret;
+ int ret, i;
- ret = i2c_smbus_write_i2c_block_data(client, reg, len, val);
- if (ret < 0) {
- dev_err(&client->dev, "failed writings to 0x%02x\n", reg);
- return ret;
+ for (i = 0; i < len; i++) {
+ ret = __tps6586x_write(client, reg + i, *(val + i));
+ if (ret < 0)
+ return ret;
}
return 0;
diff --git a/drivers/mfd/ucb1x00-ts.c b/drivers/mfd/ucb1x00-ts.c
index 000cb41..92b85e2 100644
--- a/drivers/mfd/ucb1x00-ts.c
+++ b/drivers/mfd/ucb1x00-ts.c
@@ -385,12 +385,18 @@ static int ucb1x00_ts_add(struct ucb1x00_dev *dev)
idev->close = ucb1x00_ts_close;
__set_bit(EV_ABS, idev->evbit);
- __set_bit(ABS_X, idev->absbit);
- __set_bit(ABS_Y, idev->absbit);
- __set_bit(ABS_PRESSURE, idev->absbit);
input_set_drvdata(idev, ts);
+ ucb1x00_adc_enable(ts->ucb);
+ ts->x_res = ucb1x00_ts_read_xres(ts);
+ ts->y_res = ucb1x00_ts_read_yres(ts);
+ ucb1x00_adc_disable(ts->ucb);
+
+ input_set_abs_params(idev, ABS_X, 0, ts->x_res, 0, 0);
+ input_set_abs_params(idev, ABS_Y, 0, ts->y_res, 0, 0);
+ input_set_abs_params(idev, ABS_PRESSURE, 0, 0, 0, 0);
+
err = input_register_device(idev);
if (err)
goto fail;
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 41233c7..f4016a0 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -246,6 +246,16 @@ static int wm8994_suspend(struct device *dev)
struct wm8994 *wm8994 = dev_get_drvdata(dev);
int ret;
+ /* Don't actually go through with the suspend if the CODEC is
+ * still active (eg, for audio passthrough from CP. */
+ ret = wm8994_reg_read(wm8994, WM8994_POWER_MANAGEMENT_1);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read power status: %d\n", ret);
+ } else if (ret & WM8994_VMID_SEL_MASK) {
+ dev_dbg(dev, "CODEC still active, ignoring suspend\n");
+ return 0;
+ }
+
/* GPIO configuration state is saved here since we may be configuring
* the GPIO alternate functions even if we're not using the gpiolib
* driver for them.
@@ -261,6 +271,8 @@ static int wm8994_suspend(struct device *dev)
if (ret < 0)
dev_err(dev, "Failed to save LDO registers: %d\n", ret);
+ wm8994->suspended = true;
+
ret = regulator_bulk_disable(wm8994->num_supplies,
wm8994->supplies);
if (ret != 0) {
@@ -276,6 +288,10 @@ static int wm8994_resume(struct device *dev)
struct wm8994 *wm8994 = dev_get_drvdata(dev);
int ret;
+ /* We may have lied to the PM core about suspending */
+ if (!wm8994->suspended)
+ return 0;
+
ret = regulator_bulk_enable(wm8994->num_supplies,
wm8994->supplies);
if (ret != 0) {
@@ -298,6 +314,8 @@ static int wm8994_resume(struct device *dev)
if (ret < 0)
dev_err(dev, "Failed to restore GPIO registers: %d\n", ret);
+ wm8994->suspended = false;
+
return 0;
}
#endif
diff --git a/drivers/misc/bmp085.c b/drivers/misc/bmp085.c
index 63ee4c1..b6e1c9a 100644
--- a/drivers/misc/bmp085.c
+++ b/drivers/misc/bmp085.c
@@ -449,6 +449,7 @@ static const struct i2c_device_id bmp085_id[] = {
{ "bmp085", 0 },
{ }
};
+MODULE_DEVICE_TABLE(i2c, bmp085_id);
static struct i2c_driver bmp085_driver = {
.driver = {
diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c
index 5f6852d..44d4475 100644
--- a/drivers/misc/tifm_core.c
+++ b/drivers/misc/tifm_core.c
@@ -329,7 +329,7 @@ static int __init tifm_init(void)
{
int rc;
- workqueue = create_freezeable_workqueue("tifm");
+ workqueue = create_freezable_workqueue("tifm");
if (!workqueue)
return -ENOMEM;
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 4d2ea8e..6df5a55 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -785,7 +785,7 @@ static int __init vmballoon_init(void)
if (x86_hyper != &x86_hyper_vmware)
return -ENODEV;
- vmballoon_wq = create_freezeable_workqueue("vmmemctl");
+ vmballoon_wq = create_freezable_workqueue("vmmemctl");
if (!vmballoon_wq) {
pr_err("failed to create workqueue\n");
return -ENOMEM;
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 5c4a54d..ebc62ad 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -792,7 +792,6 @@ int mmc_attach_sdio(struct mmc_host *host)
*/
mmc_release_host(host);
err = mmc_add_card(host->card);
- mmc_claim_host(host);
if (err)
goto remove_added;
@@ -805,12 +804,12 @@ int mmc_attach_sdio(struct mmc_host *host)
goto remove_added;
}
+ mmc_claim_host(host);
return 0;
remove_added:
/* Remove without lock if the device has been added. */
- mmc_release_host(host);
mmc_sdio_remove(host);
mmc_claim_host(host);
remove:
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 67f17d6..5bbb87d 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -2,7 +2,7 @@
* linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
*
* Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
- * Copyright (C) 2010 ST-Ericsson AB.
+ * Copyright (C) 2010 ST-Ericsson SA
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -25,8 +25,10 @@
#include <linux/clk.h>
#include <linux/scatterlist.h>
#include <linux/gpio.h>
-#include <linux/amba/mmci.h>
#include <linux/regulator/consumer.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/amba/mmci.h>
#include <asm/div64.h>
#include <asm/io.h>
@@ -142,9 +144,6 @@ mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
host->mrq = NULL;
host->cmd = NULL;
- if (mrq->data)
- mrq->data->bytes_xfered = host->data_xfered;
-
/*
* Need to drop the host lock here; mmc_request_done may call
* back into the driver...
@@ -189,6 +188,248 @@ static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
}
+/*
+ * All the DMA operation mode stuff goes inside this ifdef.
+ * This assumes that you have a generic DMA device interface,
+ * no custom DMA interfaces are supported.
+ */
+#ifdef CONFIG_DMA_ENGINE
+static void __devinit mmci_dma_setup(struct mmci_host *host)
+{
+ struct mmci_platform_data *plat = host->plat;
+ const char *rxname, *txname;
+ dma_cap_mask_t mask;
+
+ if (!plat || !plat->dma_filter) {
+ dev_info(mmc_dev(host->mmc), "no DMA platform data\n");
+ return;
+ }
+
+ /* Try to acquire a generic DMA engine slave channel */
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ /*
+ * If only an RX channel is specified, the driver will
+ * attempt to use it bidirectionally, however if it is
+ * is specified but cannot be located, DMA will be disabled.
+ */
+ if (plat->dma_rx_param) {
+ host->dma_rx_channel = dma_request_channel(mask,
+ plat->dma_filter,
+ plat->dma_rx_param);
+ /* E.g if no DMA hardware is present */
+ if (!host->dma_rx_channel)
+ dev_err(mmc_dev(host->mmc), "no RX DMA channel\n");
+ }
+
+ if (plat->dma_tx_param) {
+ host->dma_tx_channel = dma_request_channel(mask,
+ plat->dma_filter,
+ plat->dma_tx_param);
+ if (!host->dma_tx_channel)
+ dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n");
+ } else {
+ host->dma_tx_channel = host->dma_rx_channel;
+ }
+
+ if (host->dma_rx_channel)
+ rxname = dma_chan_name(host->dma_rx_channel);
+ else
+ rxname = "none";
+
+ if (host->dma_tx_channel)
+ txname = dma_chan_name(host->dma_tx_channel);
+ else
+ txname = "none";
+
+ dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n",
+ rxname, txname);
+
+ /*
+ * Limit the maximum segment size in any SG entry according to
+ * the parameters of the DMA engine device.
+ */
+ if (host->dma_tx_channel) {
+ struct device *dev = host->dma_tx_channel->device->dev;
+ unsigned int max_seg_size = dma_get_max_seg_size(dev);
+
+ if (max_seg_size < host->mmc->max_seg_size)
+ host->mmc->max_seg_size = max_seg_size;
+ }
+ if (host->dma_rx_channel) {
+ struct device *dev = host->dma_rx_channel->device->dev;
+ unsigned int max_seg_size = dma_get_max_seg_size(dev);
+
+ if (max_seg_size < host->mmc->max_seg_size)
+ host->mmc->max_seg_size = max_seg_size;
+ }
+}
+
+/*
+ * This is used in __devinit or __devexit so inline it
+ * so it can be discarded.
+ */
+static inline void mmci_dma_release(struct mmci_host *host)
+{
+ struct mmci_platform_data *plat = host->plat;
+
+ if (host->dma_rx_channel)
+ dma_release_channel(host->dma_rx_channel);
+ if (host->dma_tx_channel && plat->dma_tx_param)
+ dma_release_channel(host->dma_tx_channel);
+ host->dma_rx_channel = host->dma_tx_channel = NULL;
+}
+
+static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
+{
+ struct dma_chan *chan = host->dma_current;
+ enum dma_data_direction dir;
+ u32 status;
+ int i;
+
+ /* Wait up to 1ms for the DMA to complete */
+ for (i = 0; ; i++) {
+ status = readl(host->base + MMCISTATUS);
+ if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100)
+ break;
+ udelay(10);
+ }
+
+ /*
+ * Check to see whether we still have some data left in the FIFO -
+ * this catches DMA controllers which are unable to monitor the
+ * DMALBREQ and DMALSREQ signals while allowing us to DMA to non-
+ * contiguous buffers. On TX, we'll get a FIFO underrun error.
+ */
+ if (status & MCI_RXDATAAVLBLMASK) {
+ dmaengine_terminate_all(chan);
+ if (!data->error)
+ data->error = -EIO;
+ }
+
+ if (data->flags & MMC_DATA_WRITE) {
+ dir = DMA_TO_DEVICE;
+ } else {
+ dir = DMA_FROM_DEVICE;
+ }
+
+ dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
+
+ /*
+ * Use of DMA with scatter-gather is impossible.
+ * Give up with DMA and switch back to PIO mode.
+ */
+ if (status & MCI_RXDATAAVLBLMASK) {
+ dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
+ mmci_dma_release(host);
+ }
+}
+
+static void mmci_dma_data_error(struct mmci_host *host)
+{
+ dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
+ dmaengine_terminate_all(host->dma_current);
+}
+
+static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
+{
+ struct variant_data *variant = host->variant;
+ struct dma_slave_config conf = {
+ .src_addr = host->phybase + MMCIFIFO,
+ .dst_addr = host->phybase + MMCIFIFO,
+ .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .src_maxburst = variant->fifohalfsize >> 2, /* # of words */
+ .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
+ };
+ struct mmc_data *data = host->data;
+ struct dma_chan *chan;
+ struct dma_device *device;
+ struct dma_async_tx_descriptor *desc;
+ int nr_sg;
+
+ host->dma_current = NULL;
+
+ if (data->flags & MMC_DATA_READ) {
+ conf.direction = DMA_FROM_DEVICE;
+ chan = host->dma_rx_channel;
+ } else {
+ conf.direction = DMA_TO_DEVICE;
+ chan = host->dma_tx_channel;
+ }
+
+ /* If there's no DMA channel, fall back to PIO */
+ if (!chan)
+ return -EINVAL;
+
+ /* If less than or equal to the fifo size, don't bother with DMA */
+ if (host->size <= variant->fifosize)
+ return -EINVAL;
+
+ device = chan->device;
+ nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, conf.direction);
+ if (nr_sg == 0)
+ return -EINVAL;
+
+ dmaengine_slave_config(chan, &conf);
+ desc = device->device_prep_slave_sg(chan, data->sg, nr_sg,
+ conf.direction, DMA_CTRL_ACK);
+ if (!desc)
+ goto unmap_exit;
+
+ /* Okay, go for it. */
+ host->dma_current = chan;
+
+ dev_vdbg(mmc_dev(host->mmc),
+ "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
+ data->sg_len, data->blksz, data->blocks, data->flags);
+ dmaengine_submit(desc);
+ dma_async_issue_pending(chan);
+
+ datactrl |= MCI_DPSM_DMAENABLE;
+
+ /* Trigger the DMA transfer */
+ writel(datactrl, host->base + MMCIDATACTRL);
+
+ /*
+ * Let the MMCI say when the data is ended and it's time
+ * to fire next DMA request. When that happens, MMCI will
+ * call mmci_data_end()
+ */
+ writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
+ host->base + MMCIMASK0);
+ return 0;
+
+unmap_exit:
+ dmaengine_terminate_all(chan);
+ dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction);
+ return -ENOMEM;
+}
+#else
+/* Blank functions if the DMA engine is not available */
+static inline void mmci_dma_setup(struct mmci_host *host)
+{
+}
+
+static inline void mmci_dma_release(struct mmci_host *host)
+{
+}
+
+static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
+{
+}
+
+static inline void mmci_dma_data_error(struct mmci_host *host)
+{
+}
+
+static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
+{
+ return -ENOSYS;
+}
+#endif
+
static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
{
struct variant_data *variant = host->variant;
@@ -202,9 +443,7 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
host->data = data;
host->size = data->blksz * data->blocks;
- host->data_xfered = 0;
-
- mmci_init_sg(host, data);
+ data->bytes_xfered = 0;
clks = (unsigned long long)data->timeout_ns * host->cclk;
do_div(clks, 1000000000UL);
@@ -219,15 +458,29 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
BUG_ON(1 << blksz_bits != data->blksz);
datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
- if (data->flags & MMC_DATA_READ) {
+
+ if (data->flags & MMC_DATA_READ)
datactrl |= MCI_DPSM_DIRECTION;
+
+ /*
+ * Attempt to use DMA operation mode, if this
+ * should fail, fall back to PIO mode
+ */
+ if (!mmci_dma_start_data(host, datactrl))
+ return;
+
+ /* IRQ mode, map the SG list for CPU reading/writing */
+ mmci_init_sg(host, data);
+
+ if (data->flags & MMC_DATA_READ) {
irqmask = MCI_RXFIFOHALFFULLMASK;
/*
- * If we have less than a FIFOSIZE of bytes to transfer,
- * trigger a PIO interrupt as soon as any data is available.
+ * If we have less than the fifo 'half-full' threshold to
+ * transfer, trigger a PIO interrupt as soon as any data
+ * is available.
*/
- if (host->size < variant->fifosize)
+ if (host->size < variant->fifohalfsize)
irqmask |= MCI_RXDATAAVLBLMASK;
} else {
/*
@@ -283,49 +536,51 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
u32 remain, success;
- /* Calculate how far we are into the transfer */
+ /* Terminate the DMA transfer */
+ if (dma_inprogress(host))
+ mmci_dma_data_error(host);
+
+ /*
+ * Calculate how far we are into the transfer. Note that
+ * the data counter gives the number of bytes transferred
+ * on the MMC bus, not on the host side. On reads, this
+ * can be as much as a FIFO-worth of data ahead. This
+ * matters for FIFO overruns only.
+ */
remain = readl(host->base + MMCIDATACNT);
success = data->blksz * data->blocks - remain;
- dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ (status %08x)\n", status);
+ dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
+ status, success);
if (status & MCI_DATACRCFAIL) {
/* Last block was not successful */
- host->data_xfered = round_down(success - 1, data->blksz);
+ success -= 1;
data->error = -EILSEQ;
} else if (status & MCI_DATATIMEOUT) {
- host->data_xfered = round_down(success, data->blksz);
data->error = -ETIMEDOUT;
- } else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
- host->data_xfered = round_down(success, data->blksz);
+ } else if (status & MCI_TXUNDERRUN) {
+ data->error = -EIO;
+ } else if (status & MCI_RXOVERRUN) {
+ if (success > host->variant->fifosize)
+ success -= host->variant->fifosize;
+ else
+ success = 0;
data->error = -EIO;
}
-
- /*
- * We hit an error condition. Ensure that any data
- * partially written to a page is properly coherent.
- */
- if (data->flags & MMC_DATA_READ) {
- struct sg_mapping_iter *sg_miter = &host->sg_miter;
- unsigned long flags;
-
- local_irq_save(flags);
- if (sg_miter_next(sg_miter)) {
- flush_dcache_page(sg_miter->page);
- sg_miter_stop(sg_miter);
- }
- local_irq_restore(flags);
- }
+ data->bytes_xfered = round_down(success, data->blksz);
}
if (status & MCI_DATABLOCKEND)
dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
if (status & MCI_DATAEND || data->error) {
+ if (dma_inprogress(host))
+ mmci_dma_unmap(host, data);
mmci_stop_data(host);
if (!data->error)
/* The error clause is handled above, success! */
- host->data_xfered += data->blksz * data->blocks;
+ data->bytes_xfered = data->blksz * data->blocks;
if (!data->stop) {
mmci_request_end(host, data->mrq);
@@ -498,9 +753,6 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
if (remain)
break;
- if (status & MCI_RXACTIVE)
- flush_dcache_page(sg_miter->page);
-
status = readl(base + MMCISTATUS);
} while (1);
@@ -509,10 +761,10 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
local_irq_restore(flags);
/*
- * If we're nearing the end of the read, switch to
- * "any data available" mode.
+ * If we have less than the fifo 'half-full' threshold to transfer,
+ * trigger a PIO interrupt as soon as any data is available.
*/
- if (status & MCI_RXACTIVE && host->size < variant->fifosize)
+ if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize)
mmci_set_mask1(host, MCI_RXDATAAVLBLMASK);
/*
@@ -777,6 +1029,7 @@ static int __devinit mmci_probe(struct amba_device *dev,
dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
host->mclk);
}
+ host->phybase = dev->res.start;
host->base = ioremap(dev->res.start, resource_size(&dev->res));
if (!host->base) {
ret = -ENOMEM;
@@ -904,9 +1157,12 @@ static int __devinit mmci_probe(struct amba_device *dev,
amba_set_drvdata(dev, mmc);
- dev_info(&dev->dev, "%s: PL%03x rev%u at 0x%08llx irq %d,%d\n",
- mmc_hostname(mmc), amba_part(dev), amba_rev(dev),
- (unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]);
+ dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
+ mmc_hostname(mmc), amba_part(dev), amba_manf(dev),
+ amba_rev(dev), (unsigned long long)dev->res.start,
+ dev->irq[0], dev->irq[1]);
+
+ mmci_dma_setup(host);
mmc_add_host(mmc);
@@ -953,6 +1209,7 @@ static int __devexit mmci_remove(struct amba_device *dev)
writel(0, host->base + MMCICOMMAND);
writel(0, host->base + MMCIDATACTRL);
+ mmci_dma_release(host);
free_irq(dev->irq[0], host);
if (!host->singleirq)
free_irq(dev->irq[1], host);
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index c1df7b8..ec9a7bc6 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -148,8 +148,10 @@
struct clk;
struct variant_data;
+struct dma_chan;
struct mmci_host {
+ phys_addr_t phybase;
void __iomem *base;
struct mmc_request *mrq;
struct mmc_command *cmd;
@@ -161,8 +163,6 @@ struct mmci_host {
int gpio_cd_irq;
bool singleirq;
- unsigned int data_xfered;
-
spinlock_t lock;
unsigned int mclk;
@@ -181,5 +181,16 @@ struct mmci_host {
struct sg_mapping_iter sg_miter;
unsigned int size;
struct regulator *vcc;
+
+#ifdef CONFIG_DMA_ENGINE
+ /* DMA stuff */
+ struct dma_chan *dma_current;
+ struct dma_chan *dma_rx_channel;
+ struct dma_chan *dma_tx_channel;
+
+#define dma_inprogress(host) ((host)->dma_current)
+#else
+#define dma_inprogress(host) (0)
+#endif
};
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
index d9d7efb..6322d1f 100644
--- a/drivers/mtd/nand/r852.c
+++ b/drivers/mtd/nand/r852.c
@@ -930,7 +930,7 @@ int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
init_completion(&dev->dma_done);
- dev->card_workqueue = create_freezeable_workqueue(DRV_NAME);
+ dev->card_workqueue = create_freezable_workqueue(DRV_NAME);
if (!dev->card_workqueue)
goto error9;
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index 67822cf..ac0d6a8 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -1258,7 +1258,7 @@ static struct mtd_blktrans_ops sm_ftl_ops = {
static __init int sm_module_init(void)
{
int error = 0;
- cache_flush_workqueue = create_freezeable_workqueue("smflush");
+ cache_flush_workqueue = create_freezable_workqueue("smflush");
if (IS_ERR(cache_flush_workqueue))
return PTR_ERR(cache_flush_workqueue);
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 653c624..7897d11 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -22,7 +22,7 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.62.00-5"
+#define DRV_MODULE_VERSION "1.62.00-6"
#define DRV_MODULE_RELDATE "2011/01/30"
#define BNX2X_BC_VER 0x040200
@@ -1613,19 +1613,23 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define BNX2X_BTR 4
#define MAX_SPQ_PENDING 8
-
-/* CMNG constants
- derived from lab experiments, and not from system spec calculations !!! */
-#define DEF_MIN_RATE 100
+/* CMNG constants, as derived from system spec calculations */
+/* default MIN rate in case VNIC min rate is configured to zero - 100Mbps */
+#define DEF_MIN_RATE 100
/* resolution of the rate shaping timer - 100 usec */
-#define RS_PERIODIC_TIMEOUT_USEC 100
-/* resolution of fairness algorithm in usecs -
- coefficient for calculating the actual t fair */
-#define T_FAIR_COEF 10000000
+#define RS_PERIODIC_TIMEOUT_USEC 100
/* number of bytes in single QM arbitration cycle -
- coefficient for calculating the fairness timer */
-#define QM_ARB_BYTES 40000
-#define FAIR_MEM 2
+ * coefficient for calculating the fairness timer */
+#define QM_ARB_BYTES 160000
+/* resolution of Min algorithm 1:100 */
+#define MIN_RES 100
+/* how many bytes above threshold for the minimal credit of Min algorithm*/
+#define MIN_ABOVE_THRESH 32768
+/* Fairness algorithm integration time coefficient -
+ * for calculating the actual Tfair */
+#define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES)
+/* Memory of fairness algorithm . 2 cycles */
+#define FAIR_MEM 2
#define ATTN_NIG_FOR_FUNC (1L << 8)
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 710ce5d..9379812 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -259,10 +259,44 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
#endif
}
+/* Timestamp option length allowed for TPA aggregation:
+ *
+ * nop nop kind length echo val
+ */
+#define TPA_TSTAMP_OPT_LEN 12
+/**
+ * Calculate the approximate value of the MSS for this
+ * aggregation using the first packet of it.
+ *
+ * @param bp
+ * @param parsing_flags Parsing flags from the START CQE
+ * @param len_on_bd Total length of the first packet for the
+ * aggregation.
+ */
+static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
+ u16 len_on_bd)
+{
+ /* TPA arrgregation won't have an IP options and TCP options
+ * other than timestamp.
+ */
+ u16 hdrs_len = ETH_HLEN + sizeof(struct iphdr) + sizeof(struct tcphdr);
+
+
+ /* Check if there was a TCP timestamp, if there is it's will
+ * always be 12 bytes length: nop nop kind length echo val.
+ *
+ * Otherwise FW would close the aggregation.
+ */
+ if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
+ hdrs_len += TPA_TSTAMP_OPT_LEN;
+
+ return len_on_bd - hdrs_len;
+}
+
static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
struct sk_buff *skb,
struct eth_fast_path_rx_cqe *fp_cqe,
- u16 cqe_idx)
+ u16 cqe_idx, u16 parsing_flags)
{
struct sw_rx_page *rx_pg, old_rx_pg;
u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
@@ -275,8 +309,8 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
/* This is needed in order to enable forwarding support */
if (frag_size)
- skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
- max(frag_size, (u32)len_on_bd));
+ skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, parsing_flags,
+ len_on_bd);
#ifdef BNX2X_STOP_ON_ERROR
if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
@@ -344,6 +378,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
if (likely(new_skb)) {
/* fix ip xsum and give it to the stack */
/* (no need to map the new skb) */
+ u16 parsing_flags =
+ le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags);
prefetch(skb);
prefetch(((char *)(skb)) + L1_CACHE_BYTES);
@@ -373,9 +409,9 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
}
if (!bnx2x_fill_frag_skb(bp, fp, skb,
- &cqe->fast_path_cqe, cqe_idx)) {
- if ((le16_to_cpu(cqe->fast_path_cqe.
- pars_flags.flags) & PARSING_FLAGS_VLAN))
+ &cqe->fast_path_cqe, cqe_idx,
+ parsing_flags)) {
+ if (parsing_flags & PARSING_FLAGS_VLAN)
__vlan_hwaccel_put_tag(skb,
le16_to_cpu(cqe->fast_path_cqe.
vlan_tag));
@@ -703,19 +739,20 @@ u16 bnx2x_get_mf_speed(struct bnx2x *bp)
{
u16 line_speed = bp->link_vars.line_speed;
if (IS_MF(bp)) {
- u16 maxCfg = (bp->mf_config[BP_VN(bp)] &
- FUNC_MF_CFG_MAX_BW_MASK) >>
- FUNC_MF_CFG_MAX_BW_SHIFT;
- /* Calculate the current MAX line speed limit for the DCC
- * capable devices
+ u16 maxCfg = bnx2x_extract_max_cfg(bp,
+ bp->mf_config[BP_VN(bp)]);
+
+ /* Calculate the current MAX line speed limit for the MF
+ * devices
*/
- if (IS_MF_SD(bp)) {
+ if (IS_MF_SI(bp))
+ line_speed = (line_speed * maxCfg) / 100;
+ else { /* SD mode */
u16 vn_max_rate = maxCfg * 100;
if (vn_max_rate < line_speed)
line_speed = vn_max_rate;
- } else /* IS_MF_SI(bp)) */
- line_speed = (line_speed * maxCfg) / 100;
+ }
}
return line_speed;
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
index 03eb4d6..326ba44 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -1044,4 +1044,24 @@ static inline void storm_memset_cmng(struct bnx2x *bp,
void bnx2x_acquire_phy_lock(struct bnx2x *bp);
void bnx2x_release_phy_lock(struct bnx2x *bp);
+/**
+ * Extracts MAX BW part from MF configuration.
+ *
+ * @param bp
+ * @param mf_cfg
+ *
+ * @return u16
+ */
+static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg)
+{
+ u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
+ FUNC_MF_CFG_MAX_BW_SHIFT;
+ if (!max_cfg) {
+ BNX2X_ERR("Illegal configuration detected for Max BW - "
+ "using 100 instead\n");
+ max_cfg = 100;
+ }
+ return max_cfg;
+}
+
#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index 5b44a8b..ef29199 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -238,7 +238,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
speed |= (cmd->speed_hi << 16);
if (IS_MF_SI(bp)) {
- u32 param = 0;
+ u32 param = 0, part;
u32 line_speed = bp->link_vars.line_speed;
/* use 10G if no link detected */
@@ -251,9 +251,11 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
REQ_BC_VER_4_SET_MF_BW);
return -EINVAL;
}
- if (line_speed < speed) {
- BNX2X_DEV_INFO("New speed should be less or equal "
- "to actual line speed\n");
+ part = (speed * 100) / line_speed;
+ if (line_speed < speed || !part) {
+ BNX2X_DEV_INFO("Speed setting should be in a range "
+ "from 1%% to 100%% "
+ "of actual line speed\n");
return -EINVAL;
}
/* load old values */
@@ -263,8 +265,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
param &= FUNC_MF_CFG_MIN_BW_MASK;
/* set new MAX value */
- param |= (((speed * 100) / line_speed)
- << FUNC_MF_CFG_MAX_BW_SHIFT)
+ param |= (part << FUNC_MF_CFG_MAX_BW_SHIFT)
& FUNC_MF_CFG_MAX_BW_MASK;
bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, param);
@@ -1781,9 +1782,7 @@ static int bnx2x_test_nvram(struct bnx2x *bp)
{ 0x100, 0x350 }, /* manuf_info */
{ 0x450, 0xf0 }, /* feature_info */
{ 0x640, 0x64 }, /* upgrade_key_info */
- { 0x6a4, 0x64 },
{ 0x708, 0x70 }, /* manuf_key_info */
- { 0x778, 0x70 },
{ 0, 0 }
};
__be32 buf[0x350 / 4];
@@ -1933,11 +1932,11 @@ static void bnx2x_self_test(struct net_device *dev,
buf[4] = 1;
etest->flags |= ETH_TEST_FL_FAILED;
}
- if (bp->port.pmf)
- if (bnx2x_link_test(bp, is_serdes) != 0) {
- buf[5] = 1;
- etest->flags |= ETH_TEST_FL_FAILED;
- }
+
+ if (bnx2x_link_test(bp, is_serdes) != 0) {
+ buf[5] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
#ifdef BNX2X_EXTRA_DEBUG
bnx2x_panic_dump(bp);
diff --git a/drivers/net/bnx2x/bnx2x_init.h b/drivers/net/bnx2x/bnx2x_init.h
index 5a268e9..fa6dbe3 100644
--- a/drivers/net/bnx2x/bnx2x_init.h
+++ b/drivers/net/bnx2x/bnx2x_init.h
@@ -241,7 +241,7 @@ static const struct {
/* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't
* want to handle "system kill" flow at the moment.
*/
- BLOCK_PRTY_INFO(PXP, 0x3ffffff, 0x3ffffff, 0x3ffffff, 0x3ffffff),
+ BLOCK_PRTY_INFO(PXP, 0x7ffffff, 0x3ffffff, 0x3ffffff, 0x7ffffff),
BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff),
BLOCK_PRTY_INFO_1(PXP2, 0x7ff, 0x7f, 0x7f, 0x7ff),
BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0),
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index d584d32..032ae18 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -1974,13 +1974,22 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
vn_max_rate = 0;
} else {
+ u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
+
vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
- /* If min rate is zero - set it to 1 */
+ /* If fairness is enabled (not all min rates are zeroes) and
+ if current min rate is zero - set it to 1.
+ This is a requirement of the algorithm. */
if (bp->vn_weight_sum && (vn_min_rate == 0))
vn_min_rate = DEF_MIN_RATE;
- vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
- FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
+
+ if (IS_MF_SI(bp))
+ /* maxCfg in percents of linkspeed */
+ vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
+ else
+ /* maxCfg is absolute in 100Mb units */
+ vn_max_rate = maxCfg * 100;
}
DP(NETIF_MSG_IFUP,
@@ -2006,7 +2015,8 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
m_fair_vn.vn_credit_delta =
max_t(u32, (vn_min_rate * (T_FAIR_COEF /
(8 * bp->vn_weight_sum))),
- (bp->cmng.fair_vars.fair_threshold * 2));
+ (bp->cmng.fair_vars.fair_threshold +
+ MIN_ABOVE_THRESH));
DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
m_fair_vn.vn_credit_delta);
}
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
index bda60d5..3445ded 100644
--- a/drivers/net/bnx2x/bnx2x_stats.c
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -1239,14 +1239,14 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
if (unlikely(bp->panic))
return;
+ bnx2x_stats_stm[bp->stats_state][event].action(bp);
+
/* Protect a state change flow */
spin_lock_bh(&bp->stats_lock);
state = bp->stats_state;
bp->stats_state = bnx2x_stats_stm[state][event].next_state;
spin_unlock_bh(&bp->stats_lock);
- bnx2x_stats_stm[state][event].action(bp);
-
if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
state, event, bp->stats_state);
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 7ab534a..7513c45 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -940,7 +940,7 @@ static int mcp251x_open(struct net_device *net)
goto open_unlock;
}
- priv->wq = create_freezeable_workqueue("mcp251x_wq");
+ priv->wq = create_freezable_workqueue("mcp251x_wq");
INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
diff --git a/drivers/net/can/softing/Kconfig b/drivers/net/can/softing/Kconfig
index 8ba81b3..5de46a9 100644
--- a/drivers/net/can/softing/Kconfig
+++ b/drivers/net/can/softing/Kconfig
@@ -18,7 +18,7 @@ config CAN_SOFTING
config CAN_SOFTING_CS
tristate "Softing Gmbh CAN pcmcia cards"
depends on PCMCIA
- select CAN_SOFTING
+ depends on CAN_SOFTING
---help---
Support for PCMCIA cards from Softing Gmbh & some cards
from Vector Gmbh.
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 5157e15..aeea9f9 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -633,6 +633,7 @@ static const struct net_device_ops softing_netdev_ops = {
};
static const struct can_bittiming_const softing_btr_const = {
+ .name = "softing",
.tseg1_min = 1,
.tseg1_max = 16,
.tseg2_min = 1,
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 7ff170c..302be4a 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -2760,6 +2760,8 @@ static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
int kcqe_cnt;
+ /* status block index must be read before reading other fields */
+ rmb();
cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
@@ -2770,6 +2772,8 @@ static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
barrier();
if (status_idx != *cp->kcq1.status_idx_ptr) {
status_idx = (u16) *cp->kcq1.status_idx_ptr;
+ /* status block index must be read first */
+ rmb();
cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
} else
break;
@@ -2888,6 +2892,8 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
u32 last_status = *info->status_idx_ptr;
int kcqe_cnt;
+ /* status block index must be read before reading the KCQ */
+ rmb();
while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
service_kcqes(dev, kcqe_cnt);
@@ -2898,6 +2904,8 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
break;
last_status = *info->status_idx_ptr;
+ /* status block index must be read before reading the KCQ */
+ rmb();
}
return last_status;
}
@@ -2906,26 +2914,35 @@ static void cnic_service_bnx2x_bh(unsigned long data)
{
struct cnic_dev *dev = (struct cnic_dev *) data;
struct cnic_local *cp = dev->cnic_priv;
- u32 status_idx;
+ u32 status_idx, new_status_idx;
if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
return;
- status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
+ while (1) {
+ status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
- CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
+ CNIC_WR16(dev, cp->kcq1.io_addr,
+ cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
- if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
- status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
+ if (!BNX2X_CHIP_IS_E2(cp->chip_id)) {
+ cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
+ status_idx, IGU_INT_ENABLE, 1);
+ break;
+ }
+
+ new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
+
+ if (new_status_idx != status_idx)
+ continue;
CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
MAX_KCQ_IDX);
cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
status_idx, IGU_INT_ENABLE, 1);
- } else {
- cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
- status_idx, IGU_INT_ENABLE, 1);
+
+ break;
}
}
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c
index 56166ae..6aad64d 100644
--- a/drivers/net/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/cxgb4vf/cxgb4vf_main.c
@@ -2040,7 +2040,7 @@ static int __devinit setup_debugfs(struct adapter *adapter)
{
int i;
- BUG_ON(adapter->debugfs_root == NULL);
+ BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
/*
* Debugfs support is best effort.
@@ -2061,7 +2061,7 @@ static int __devinit setup_debugfs(struct adapter *adapter)
*/
static void cleanup_debugfs(struct adapter *adapter)
{
- BUG_ON(adapter->debugfs_root == NULL);
+ BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
/*
* Unlike our sister routine cleanup_proc(), we don't need to remove
@@ -2489,17 +2489,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
struct net_device *netdev;
/*
- * Vet our module parameters.
- */
- if (msi != MSI_MSIX && msi != MSI_MSI) {
- dev_err(&pdev->dev, "bad module parameter msi=%d; must be %d"
- " (MSI-X or MSI) or %d (MSI)\n", msi, MSI_MSIX,
- MSI_MSI);
- err = -EINVAL;
- goto err_out;
- }
-
- /*
* Print our driver banner the first time we're called to initialize a
* device.
*/
@@ -2711,11 +2700,11 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
/*
* Set up our debugfs entries.
*/
- if (cxgb4vf_debugfs_root) {
+ if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) {
adapter->debugfs_root =
debugfs_create_dir(pci_name(pdev),
cxgb4vf_debugfs_root);
- if (adapter->debugfs_root == NULL)
+ if (IS_ERR_OR_NULL(adapter->debugfs_root))
dev_warn(&pdev->dev, "could not create debugfs"
" directory");
else
@@ -2770,7 +2759,7 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
*/
err_free_debugfs:
- if (adapter->debugfs_root) {
+ if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
cleanup_debugfs(adapter);
debugfs_remove_recursive(adapter->debugfs_root);
}
@@ -2802,7 +2791,6 @@ err_release_regions:
err_disable_device:
pci_disable_device(pdev);
-err_out:
return err;
}
@@ -2840,7 +2828,7 @@ static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev)
/*
* Tear down our debugfs entries.
*/
- if (adapter->debugfs_root) {
+ if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
cleanup_debugfs(adapter);
debugfs_remove_recursive(adapter->debugfs_root);
}
@@ -2874,6 +2862,46 @@ static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev)
}
/*
+ * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt
+ * delivery.
+ */
+static void __devexit cxgb4vf_pci_shutdown(struct pci_dev *pdev)
+{
+ struct adapter *adapter;
+ int pidx;
+
+ adapter = pci_get_drvdata(pdev);
+ if (!adapter)
+ return;
+
+ /*
+ * Disable all Virtual Interfaces. This will shut down the
+ * delivery of all ingress packets into the chip for these
+ * Virtual Interfaces.
+ */
+ for_each_port(adapter, pidx) {
+ struct net_device *netdev;
+ struct port_info *pi;
+
+ if (!test_bit(pidx, &adapter->registered_device_map))
+ continue;
+
+ netdev = adapter->port[pidx];
+ if (!netdev)
+ continue;
+
+ pi = netdev_priv(netdev);
+ t4vf_enable_vi(adapter, pi->viid, false, false);
+ }
+
+ /*
+ * Free up all Queues which will prevent further DMA and
+ * Interrupts allowing various internal pathways to drain.
+ */
+ t4vf_free_sge_resources(adapter);
+}
+
+/*
* PCI Device registration data structures.
*/
#define CH_DEVICE(devid, idx) \
@@ -2906,6 +2934,7 @@ static struct pci_driver cxgb4vf_driver = {
.id_table = cxgb4vf_pci_tbl,
.probe = cxgb4vf_pci_probe,
.remove = __devexit_p(cxgb4vf_pci_remove),
+ .shutdown = __devexit_p(cxgb4vf_pci_shutdown),
};
/*
@@ -2915,14 +2944,25 @@ static int __init cxgb4vf_module_init(void)
{
int ret;
+ /*
+ * Vet our module parameters.
+ */
+ if (msi != MSI_MSIX && msi != MSI_MSI) {
+ printk(KERN_WARNING KBUILD_MODNAME
+ ": bad module parameter msi=%d; must be %d"
+ " (MSI-X or MSI) or %d (MSI)\n",
+ msi, MSI_MSIX, MSI_MSI);
+ return -EINVAL;
+ }
+
/* Debugfs support is optional, just warn if this fails */
cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
- if (!cxgb4vf_debugfs_root)
+ if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
printk(KERN_WARNING KBUILD_MODNAME ": could not create"
" debugfs entry, continuing\n");
ret = pci_register_driver(&cxgb4vf_driver);
- if (ret < 0)
+ if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
debugfs_remove(cxgb4vf_debugfs_root);
return ret;
}
diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c
index 0f51c80..192db22 100644
--- a/drivers/net/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/cxgb4vf/t4vf_hw.c
@@ -171,7 +171,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
delay_idx = 0;
ms = delay[0];
- for (i = 0; i < 500; i += ms) {
+ for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
if (sleep_ok) {
ms = delay[delay_idx];
if (delay_idx < ARRAY_SIZE(delay) - 1)
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 2a628d1..7018bfe 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -1008,7 +1008,7 @@ static void emac_rx_handler(void *token, int len, int status)
int ret;
/* free and bail if we are shutting down */
- if (unlikely(!netif_running(ndev))) {
+ if (unlikely(!netif_running(ndev) || !netif_carrier_ok(ndev))) {
dev_kfree_skb_any(skb);
return;
}
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 2d4c4fc..461dd6f 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -802,10 +802,7 @@ dm9000_init_dm9000(struct net_device *dev)
/* Checksum mode */
dm9000_set_rx_csum_unlocked(dev, db->rx_csum);
- /* GPIO0 on pre-activate PHY */
- iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */
- iow(db, DM9000_GPR, 0); /* Enable PHY */
ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
@@ -852,8 +849,8 @@ static void dm9000_timeout(struct net_device *dev)
unsigned long flags;
/* Save previous register address */
- reg_save = readb(db->io_addr);
spin_lock_irqsave(&db->lock, flags);
+ reg_save = readb(db->io_addr);
netif_stop_queue(dev);
dm9000_reset(db);
@@ -1194,6 +1191,10 @@ dm9000_open(struct net_device *dev)
if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
return -EAGAIN;
+ /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */
+ iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
+ mdelay(1); /* delay needs by DM9000B */
+
/* Initialize DM9000 board */
dm9000_reset(db);
dm9000_init_dm9000(dev);
diff --git a/drivers/net/dnet.c b/drivers/net/dnet.c
index 9d8a20b..8318ea0 100644
--- a/drivers/net/dnet.c
+++ b/drivers/net/dnet.c
@@ -337,8 +337,6 @@ static int dnet_mii_init(struct dnet *bp)
for (i = 0; i < PHY_MAX_ADDR; i++)
bp->mii_bus->irq[i] = PHY_POLL;
- platform_set_drvdata(bp->dev, bp->mii_bus);
-
if (mdiobus_register(bp->mii_bus)) {
err = -ENXIO;
goto err_out_free_mdio_irq;
@@ -863,6 +861,7 @@ static int __devinit dnet_probe(struct platform_device *pdev)
bp = netdev_priv(dev);
bp->dev = dev;
+ platform_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
spin_lock_init(&bp->lock);
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h
index 55c1711..33e7c45a 100644
--- a/drivers/net/e1000/e1000_osdep.h
+++ b/drivers/net/e1000/e1000_osdep.h
@@ -42,7 +42,8 @@
#define GBE_CONFIG_RAM_BASE \
((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET))
-#define GBE_CONFIG_BASE_VIRT phys_to_virt(GBE_CONFIG_RAM_BASE)
+#define GBE_CONFIG_BASE_VIRT \
+ ((void __iomem *)phys_to_virt(GBE_CONFIG_RAM_BASE))
#define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \
(iowrite16_rep(base + offset, data, count))
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 3065870..2e50228 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -937,6 +937,9 @@ static void e1000_print_hw_hang(struct work_struct *work)
u16 phy_status, phy_1000t_status, phy_ext_status;
u16 pci_status;
+ if (test_bit(__E1000_DOWN, &adapter->state))
+ return;
+
e1e_rphy(hw, PHY_STATUS, &phy_status);
e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
@@ -1506,6 +1509,9 @@ static void e1000e_downshift_workaround(struct work_struct *work)
struct e1000_adapter *adapter = container_of(work,
struct e1000_adapter, downshift_task);
+ if (test_bit(__E1000_DOWN, &adapter->state))
+ return;
+
e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
}
@@ -3338,6 +3344,21 @@ int e1000e_up(struct e1000_adapter *adapter)
return 0;
}
+static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (!(adapter->flags2 & FLAG2_DMA_BURST))
+ return;
+
+ /* flush pending descriptor writebacks to memory */
+ ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
+ ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
+
+ /* execute the writes immediately */
+ e1e_flush();
+}
+
void e1000e_down(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -3377,6 +3398,9 @@ void e1000e_down(struct e1000_adapter *adapter)
if (!pci_channel_offline(adapter->pdev))
e1000e_reset(adapter);
+
+ e1000e_flush_descriptors(adapter);
+
e1000_clean_tx_ring(adapter);
e1000_clean_rx_ring(adapter);
@@ -3765,6 +3789,10 @@ static void e1000e_update_phy_task(struct work_struct *work)
{
struct e1000_adapter *adapter = container_of(work,
struct e1000_adapter, update_phy_task);
+
+ if (test_bit(__E1000_DOWN, &adapter->state))
+ return;
+
e1000_get_phy_info(&adapter->hw);
}
@@ -3775,6 +3803,10 @@ static void e1000e_update_phy_task(struct work_struct *work)
static void e1000_update_phy_info(unsigned long data)
{
struct e1000_adapter *adapter = (struct e1000_adapter *) data;
+
+ if (test_bit(__E1000_DOWN, &adapter->state))
+ return;
+
schedule_work(&adapter->update_phy_task);
}
@@ -4149,6 +4181,9 @@ static void e1000_watchdog_task(struct work_struct *work)
u32 link, tctl;
int tx_pending = 0;
+ if (test_bit(__E1000_DOWN, &adapter->state))
+ return;
+
link = e1000e_has_link(adapter);
if ((netif_carrier_ok(netdev)) && link) {
/* Cancel scheduled suspend requests. */
@@ -4337,19 +4372,12 @@ link_up:
else
ew32(ICS, E1000_ICS_RXDMT0);
+ /* flush pending descriptors to memory before detecting Tx hang */
+ e1000e_flush_descriptors(adapter);
+
/* Force detection of hung controller every watchdog period */
adapter->detect_tx_hung = 1;
- /* flush partial descriptors to memory before detecting Tx hang */
- if (adapter->flags2 & FLAG2_DMA_BURST) {
- ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
- ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
- /*
- * no need to flush the writes because the timeout code does
- * an er32 first thing
- */
- }
-
/*
* With 82571 controllers, LAA may be overwritten due to controller
* reset from the other port. Set the appropriate LAA in RAR[0]
@@ -4887,6 +4915,10 @@ static void e1000_reset_task(struct work_struct *work)
struct e1000_adapter *adapter;
adapter = container_of(work, struct e1000_adapter, reset_task);
+ /* don't run the task if already down */
+ if (test_bit(__E1000_DOWN, &adapter->state))
+ return;
+
if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
(adapter->flags & FLAG_RX_RESTART_NOW))) {
e1000e_dump(adapter);
@@ -5935,7 +5967,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
/* APME bit in EEPROM is mapped to WUC.APME */
eeprom_data = er32(WUC);
eeprom_apme_mask = E1000_WUC_APME;
- if (eeprom_data & E1000_WUC_PHY_WAKE)
+ if ((hw->mac.type > e1000_ich10lan) &&
+ (eeprom_data & E1000_WUC_PHY_WAKE))
adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
} else if (adapter->flags & FLAG_APME_IN_CTRL3) {
if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 2a71373..cd0282d 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -74,7 +74,8 @@ static struct platform_device_id fec_devtype[] = {
}, {
.name = "imx28-fec",
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME,
- }
+ },
+ { }
};
static unsigned char macaddr[ETH_ALEN];
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index af09296..9c0b1ba 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -5645,6 +5645,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
goto out_error;
}
+ netif_carrier_off(dev);
+
dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
diff --git a/drivers/net/igbvf/vf.c b/drivers/net/igbvf/vf.c
index 74486a8..af3822f 100644
--- a/drivers/net/igbvf/vf.c
+++ b/drivers/net/igbvf/vf.c
@@ -220,7 +220,7 @@ static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr)
* The parameter rar_count will usually be hw->mac.rar_entry_count
* unless there are workarounds that change this.
**/
-void e1000_update_mc_addr_list_vf(struct e1000_hw *hw,
+static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw,
u8 *mc_addr_list, u32 mc_addr_count,
u32 rar_used_count, u32 rar_count)
{
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index 8753980..c54a882 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -159,7 +159,7 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
struct scatterlist *sg;
unsigned int i, j, dmacount;
unsigned int len;
- static const unsigned int bufflen = 4096;
+ static const unsigned int bufflen = IXGBE_FCBUFF_MIN;
unsigned int firstoff = 0;
unsigned int lastsize;
unsigned int thisoff = 0;
@@ -254,6 +254,24 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
/* only the last buffer may have non-full bufflen */
lastsize = thisoff + thislen;
+ /*
+ * lastsize can not be buffer len.
+ * If it is then adding another buffer with lastsize = 1.
+ */
+ if (lastsize == bufflen) {
+ if (j >= IXGBE_BUFFCNT_MAX) {
+ e_err(drv, "xid=%x:%d,%d,%d:addr=%llx "
+ "not enough user buffers. We need an extra "
+ "buffer because lastsize is bufflen.\n",
+ xid, i, j, dmacount, (u64)addr);
+ goto out_noddp_free;
+ }
+
+ ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma);
+ j++;
+ lastsize = 1;
+ }
+
fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT);
fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT);
fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT);
@@ -532,6 +550,24 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
e_err(drv, "failed to allocated FCoE DDP pool\n");
spin_lock_init(&fcoe->lock);
+
+ /* Extra buffer to be shared by all DDPs for HW work around */
+ fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
+ if (fcoe->extra_ddp_buffer == NULL) {
+ e_err(drv, "failed to allocated extra DDP buffer\n");
+ goto out_extra_ddp_buffer_alloc;
+ }
+
+ fcoe->extra_ddp_buffer_dma =
+ dma_map_single(&adapter->pdev->dev,
+ fcoe->extra_ddp_buffer,
+ IXGBE_FCBUFF_MIN,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev,
+ fcoe->extra_ddp_buffer_dma)) {
+ e_err(drv, "failed to map extra DDP buffer\n");
+ goto out_extra_ddp_buffer_dma;
+ }
}
/* Enable L2 eth type filter for FCoE */
@@ -581,6 +617,14 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
}
}
#endif
+
+ return;
+
+out_extra_ddp_buffer_dma:
+ kfree(fcoe->extra_ddp_buffer);
+out_extra_ddp_buffer_alloc:
+ pci_pool_destroy(fcoe->pool);
+ fcoe->pool = NULL;
}
/**
@@ -600,6 +644,11 @@ void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter)
if (fcoe->pool) {
for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++)
ixgbe_fcoe_ddp_put(adapter->netdev, i);
+ dma_unmap_single(&adapter->pdev->dev,
+ fcoe->extra_ddp_buffer_dma,
+ IXGBE_FCBUFF_MIN,
+ DMA_FROM_DEVICE);
+ kfree(fcoe->extra_ddp_buffer);
pci_pool_destroy(fcoe->pool);
fcoe->pool = NULL;
}
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.h b/drivers/net/ixgbe/ixgbe_fcoe.h
index 4bc2c55..65cc8fb 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ixgbe/ixgbe_fcoe.h
@@ -70,6 +70,8 @@ struct ixgbe_fcoe {
spinlock_t lock;
struct pci_pool *pool;
struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX];
+ unsigned char *extra_ddp_buffer;
+ dma_addr_t extra_ddp_buffer_dma;
};
#endif /* _IXGBE_FCOE_H */
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index fbae703..30f9ccf 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -3728,7 +3728,8 @@ static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
* We need to try and force an autonegotiation
* session, then bring up link.
*/
- hw->mac.ops.setup_sfp(hw);
+ if (hw->mac.ops.setup_sfp)
+ hw->mac.ops.setup_sfp(hw);
if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
schedule_work(&adapter->multispeed_fiber_task);
} else {
@@ -5968,7 +5969,8 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work)
unregister_netdev(adapter->netdev);
return;
}
- hw->mac.ops.setup_sfp(hw);
+ if (hw->mac.ops.setup_sfp)
+ hw->mac.ops.setup_sfp(hw);
if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
/* This will also work for DA Twinax connections */
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index f69e73e..79ccb54 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -260,7 +260,7 @@ static int macb_mii_init(struct macb *bp)
for (i = 0; i < PHY_MAX_ADDR; i++)
bp->mii_bus->irq[i] = PHY_POLL;
- platform_set_drvdata(bp->dev, bp->mii_bus);
+ dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
if (mdiobus_register(bp->mii_bus))
goto err_out_free_mdio_irq;
diff --git a/drivers/net/pch_gbe/pch_gbe.h b/drivers/net/pch_gbe/pch_gbe.h
index a0c26a9..e1e33c8 100644
--- a/drivers/net/pch_gbe/pch_gbe.h
+++ b/drivers/net/pch_gbe/pch_gbe.h
@@ -73,7 +73,7 @@ struct pch_gbe_regs {
struct pch_gbe_regs_mac_adr mac_adr[16];
u32 ADDR_MASK;
u32 MIIM;
- u32 reserve2;
+ u32 MAC_ADDR_LOAD;
u32 RGMII_ST;
u32 RGMII_CTRL;
u32 reserve3[3];
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c
index 4c9a7d4..b99e90a 100644
--- a/drivers/net/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/pch_gbe/pch_gbe_main.c
@@ -29,6 +29,7 @@ const char pch_driver_version[] = DRV_VERSION;
#define PCH_GBE_SHORT_PKT 64
#define DSC_INIT16 0xC000
#define PCH_GBE_DMA_ALIGN 0
+#define PCH_GBE_DMA_PADDING 2
#define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */
#define PCH_GBE_COPYBREAK_DEFAULT 256
#define PCH_GBE_PCI_BAR 1
@@ -88,6 +89,12 @@ static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
int data);
+
+inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
+{
+ iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD);
+}
+
/**
* pch_gbe_mac_read_mac_addr - Read MAC address
* @hw: Pointer to the HW structure
@@ -1365,16 +1372,13 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
struct pch_gbe_buffer *buffer_info;
struct pch_gbe_rx_desc *rx_desc;
u32 length;
- unsigned char tmp_packet[ETH_HLEN];
unsigned int i;
unsigned int cleaned_count = 0;
bool cleaned = false;
- struct sk_buff *skb;
+ struct sk_buff *skb, *new_skb;
u8 dma_status;
u16 gbec_status;
u32 tcp_ip_status;
- u8 skb_copy_flag = 0;
- u8 skb_padding_flag = 0;
i = rx_ring->next_to_clean;
@@ -1418,55 +1422,70 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
pr_err("Receive CRC Error\n");
} else {
/* get receive length */
- /* length convert[-3], padding[-2] */
- length = (rx_desc->rx_words_eob) - 3 - 2;
+ /* length convert[-3] */
+ length = (rx_desc->rx_words_eob) - 3;
/* Decide the data conversion method */
if (!adapter->rx_csum) {
/* [Header:14][payload] */
- skb_padding_flag = 0;
- skb_copy_flag = 1;
+ if (NET_IP_ALIGN) {
+ /* Because alignment differs,
+ * the new_skb is newly allocated,
+ * and data is copied to new_skb.*/
+ new_skb = netdev_alloc_skb(netdev,
+ length + NET_IP_ALIGN);
+ if (!new_skb) {
+ /* dorrop error */
+ pr_err("New skb allocation "
+ "Error\n");
+ goto dorrop;
+ }
+ skb_reserve(new_skb, NET_IP_ALIGN);
+ memcpy(new_skb->data, skb->data,
+ length);
+ skb = new_skb;
+ } else {
+ /* DMA buffer is used as SKB as it is.*/
+ buffer_info->skb = NULL;
+ }
} else {
/* [Header:14][padding:2][payload] */
- skb_padding_flag = 1;
- if (length < copybreak)
- skb_copy_flag = 1;
- else
- skb_copy_flag = 0;
- }
-
- /* Data conversion */
- if (skb_copy_flag) { /* recycle skb */
- struct sk_buff *new_skb;
- new_skb =
- netdev_alloc_skb(netdev,
- length + NET_IP_ALIGN);
- if (new_skb) {
- if (!skb_padding_flag) {
- skb_reserve(new_skb,
- NET_IP_ALIGN);
+ /* The length includes padding length */
+ length = length - PCH_GBE_DMA_PADDING;
+ if ((length < copybreak) ||
+ (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) {
+ /* Because alignment differs,
+ * the new_skb is newly allocated,
+ * and data is copied to new_skb.
+ * Padding data is deleted
+ * at the time of a copy.*/
+ new_skb = netdev_alloc_skb(netdev,
+ length + NET_IP_ALIGN);
+ if (!new_skb) {
+ /* dorrop error */
+ pr_err("New skb allocation "
+ "Error\n");
+ goto dorrop;
}
+ skb_reserve(new_skb, NET_IP_ALIGN);
memcpy(new_skb->data, skb->data,
- length);
- /* save the skb
- * in buffer_info as good */
+ ETH_HLEN);
+ memcpy(&new_skb->data[ETH_HLEN],
+ &skb->data[ETH_HLEN +
+ PCH_GBE_DMA_PADDING],
+ length - ETH_HLEN);
skb = new_skb;
- } else if (!skb_padding_flag) {
- /* dorrop error */
- pr_err("New skb allocation Error\n");
- goto dorrop;
+ } else {
+ /* Padding data is deleted
+ * by moving header data.*/
+ memmove(&skb->data[PCH_GBE_DMA_PADDING],
+ &skb->data[0], ETH_HLEN);
+ skb_reserve(skb, NET_IP_ALIGN);
+ buffer_info->skb = NULL;
}
- } else {
- buffer_info->skb = NULL;
}
- if (skb_padding_flag) {
- memcpy(&tmp_packet[0], &skb->data[0], ETH_HLEN);
- memcpy(&skb->data[NET_IP_ALIGN], &tmp_packet[0],
- ETH_HLEN);
- skb_reserve(skb, NET_IP_ALIGN);
-
- }
-
+ /* The length includes FCS length */
+ length = length - ETH_FCS_LEN;
/* update status of driver */
adapter->stats.rx_bytes += length;
adapter->stats.rx_packets++;
@@ -2318,6 +2337,7 @@ static int pch_gbe_probe(struct pci_dev *pdev,
netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO;
pch_gbe_set_ethtool_ops(netdev);
+ pch_gbe_mac_load_mac_addr(&adapter->hw);
pch_gbe_mac_reset_hw(&adapter->hw);
/* setup the private structure */
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 9226cda..530ab5a 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -691,6 +691,7 @@ static struct pcmcia_device_id fmvj18x_ids[] = {
PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a),
PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0e01),
PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05),
+ PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0b05),
PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101),
PCMCIA_DEVICE_NULL,
};
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 59ccf0c..7ffdb80 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -25,6 +25,7 @@
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
#include <linux/firmware.h>
+#include <linux/pci-aspm.h>
#include <asm/system.h>
#include <asm/io.h>
@@ -617,8 +618,9 @@ static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
}
}
-static void rtl8168_oob_notify(void __iomem *ioaddr, u8 cmd)
+static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
{
+ void __iomem *ioaddr = tp->mmio_addr;
int i;
RTL_W8(ERIDR, cmd);
@@ -630,7 +632,7 @@ static void rtl8168_oob_notify(void __iomem *ioaddr, u8 cmd)
break;
}
- ocp_write(ioaddr, 0x1, 0x30, 0x00000001);
+ ocp_write(tp, 0x1, 0x30, 0x00000001);
}
#define OOB_CMD_RESET 0x00
@@ -2868,8 +2870,11 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
- if (tp->mac_version == RTL_GIGA_MAC_VER_27)
+ if (((tp->mac_version == RTL_GIGA_MAC_VER_27) ||
+ (tp->mac_version == RTL_GIGA_MAC_VER_28)) &&
+ (ocp_read(tp, 0x0f, 0x0010) & 0x00008000)) {
return;
+ }
if (((tp->mac_version == RTL_GIGA_MAC_VER_23) ||
(tp->mac_version == RTL_GIGA_MAC_VER_24)) &&
@@ -2891,6 +2896,8 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_25:
case RTL_GIGA_MAC_VER_26:
+ case RTL_GIGA_MAC_VER_27:
+ case RTL_GIGA_MAC_VER_28:
RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
break;
}
@@ -2900,12 +2907,17 @@ static void r8168_pll_power_up(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
- if (tp->mac_version == RTL_GIGA_MAC_VER_27)
+ if (((tp->mac_version == RTL_GIGA_MAC_VER_27) ||
+ (tp->mac_version == RTL_GIGA_MAC_VER_28)) &&
+ (ocp_read(tp, 0x0f, 0x0010) & 0x00008000)) {
return;
+ }
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_25:
case RTL_GIGA_MAC_VER_26:
+ case RTL_GIGA_MAC_VER_27:
+ case RTL_GIGA_MAC_VER_28:
RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
break;
}
@@ -3009,6 +3021,11 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
mii->reg_num_mask = 0x1f;
mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
+ /* disable ASPM completely as that cause random device stop working
+ * problems as well as full system hangs for some PCIe devices users */
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+ PCIE_LINK_STATE_CLKPM);
+
/* enable device (incl. PCI PM wakeup and hotplug setup) */
rc = pci_enable_device(pdev);
if (rc < 0) {
@@ -3042,7 +3059,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_mwi_2;
}
- tp->cp_cmd = PCIMulRW | RxChkSum;
+ tp->cp_cmd = RxChkSum;
if ((sizeof(dma_addr_t) > 4) &&
!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
@@ -3190,6 +3207,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (pci_dev_run_wake(pdev))
pm_runtime_put_noidle(&pdev->dev);
+ netif_carrier_off(dev);
+
out:
return rc;
@@ -3316,7 +3335,8 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
/* Disable interrupts */
rtl8169_irq_mask_and_ack(ioaddr);
- if (tp->mac_version == RTL_GIGA_MAC_VER_28) {
+ if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_28) {
while (RTL_R8(TxPoll) & NPQ)
udelay(20);
@@ -3845,8 +3865,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
Cxpl_dbg_sel | \
ASF | \
PktCntrDisable | \
- PCIDAC | \
- PCIMulRW)
+ Mac_dbgo_sel)
static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
{
@@ -3876,8 +3895,6 @@ static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
RTL_W8(Config1, cfg1 & ~LEDS0);
- RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK);
-
rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
}
@@ -3889,8 +3906,6 @@ static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev)
RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
-
- RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK);
}
static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev)
@@ -3916,6 +3931,8 @@ static void rtl_hw_start_8101(struct net_device *dev)
}
}
+ RTL_W8(Cfg9346, Cfg9346_Unlock);
+
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_07:
rtl_hw_start_8102e_1(ioaddr, pdev);
@@ -3930,14 +3947,13 @@ static void rtl_hw_start_8101(struct net_device *dev)
break;
}
- RTL_W8(Cfg9346, Cfg9346_Unlock);
+ RTL_W8(Cfg9346, Cfg9346_Lock);
RTL_W8(MaxTxPacketSize, TxPacketMax);
rtl_set_rx_max_size(ioaddr, rx_buf_sz);
- tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
-
+ tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
RTL_W16(CPlusCmd, tp->cp_cmd);
RTL_W16(IntrMitigate, 0x0000);
@@ -3947,14 +3963,10 @@ static void rtl_hw_start_8101(struct net_device *dev)
RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
rtl_set_rx_tx_config_registers(tp);
- RTL_W8(Cfg9346, Cfg9346_Lock);
-
RTL_R8(IntrMask);
rtl_set_rx_mode(dev);
- RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
-
RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
RTL_W16(IntrMask, tp->intr_event);
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index 0e8bb19..ca886d9 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -569,9 +569,14 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
struct ethtool_test *test, u64 *data)
{
struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_self_tests efx_tests;
+ struct efx_self_tests *efx_tests;
int already_up;
- int rc;
+ int rc = -ENOMEM;
+
+ efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL);
+ if (!efx_tests)
+ goto fail;
+
ASSERT_RTNL();
if (efx->state != STATE_RUNNING) {
@@ -589,13 +594,11 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
if (rc) {
netif_err(efx, drv, efx->net_dev,
"failed opening device.\n");
- goto fail2;
+ goto fail1;
}
}
- memset(&efx_tests, 0, sizeof(efx_tests));
-
- rc = efx_selftest(efx, &efx_tests, test->flags);
+ rc = efx_selftest(efx, efx_tests, test->flags);
if (!already_up)
dev_close(efx->net_dev);
@@ -604,10 +607,11 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
rc == 0 ? "passed" : "failed",
(test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
- fail2:
- fail1:
+fail1:
/* Fill ethtool results structures */
- efx_ethtool_fill_self_tests(efx, &efx_tests, NULL, data);
+ efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data);
+ kfree(efx_tests);
+fail:
if (rc)
test->flags |= ETH_TEST_FL_FAILED;
}
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 42daf98..35b28f4 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -3856,9 +3856,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN);
memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
- /* device is off until link detection */
- netif_carrier_off(dev);
-
return dev;
}
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 34a0af3..0e5f031 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -1560,8 +1560,10 @@ static int stmmac_mac_device_setup(struct net_device *dev)
priv->hw = device;
- if (device_can_wakeup(priv->device))
+ if (device_can_wakeup(priv->device)) {
priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
+ enable_irq_wake(dev->irq);
+ }
return 0;
}
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 93b32d3..06c0e503 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -11158,7 +11158,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
break; /* We have no PHY */
- if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
+ if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) ||
+ ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
+ !netif_running(dev)))
return -EAGAIN;
spin_lock_bh(&tp->lock);
@@ -11174,7 +11176,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
break; /* We have no PHY */
- if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
+ if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) ||
+ ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
+ !netif_running(dev)))
return -EAGAIN;
spin_lock_bh(&tp->lock);
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 02b622e..5002f5b 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -651,6 +651,10 @@ static const struct usb_device_id products[] = {
.driver_info = (unsigned long)&dm9601_info,
},
{
+ USB_DEVICE(0x0fe6, 0x9700), /* DM9601 USB to Fast Ethernet Adapter */
+ .driver_info = (unsigned long)&dm9601_info,
+ },
+ {
USB_DEVICE(0x0a46, 0x9000), /* DM9000E */
.driver_info = (unsigned long)&dm9601_info,
},
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index bed8fce..6d83812 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2628,15 +2628,15 @@ exit:
static void hso_free_tiomget(struct hso_serial *serial)
{
- struct hso_tiocmget *tiocmget = serial->tiocmget;
+ struct hso_tiocmget *tiocmget;
+ if (!serial)
+ return;
+ tiocmget = serial->tiocmget;
if (tiocmget) {
- if (tiocmget->urb) {
- usb_free_urb(tiocmget->urb);
- tiocmget->urb = NULL;
- }
+ usb_free_urb(tiocmget->urb);
+ tiocmget->urb = NULL;
serial->tiocmget = NULL;
kfree(tiocmget);
-
}
}
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index ed9a416..95c41d5 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -931,8 +931,10 @@ fail_halt:
if (urb != NULL) {
clear_bit (EVENT_RX_MEMORY, &dev->flags);
status = usb_autopm_get_interface(dev->intf);
- if (status < 0)
+ if (status < 0) {
+ usb_free_urb(urb);
goto fail_lowmem;
+ }
if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK)
resched = 0;
usb_autopm_put_interface(dev->intf);
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 78c26fd..62ce2f4 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -282,6 +282,34 @@ int ath5k_hw_phy_disable(struct ath5k_hw *ah)
return 0;
}
+/*
+ * Wait for synth to settle
+ */
+static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel)
+{
+ /*
+ * On 5211+ read activation -> rx delay
+ * and use it (100ns steps).
+ */
+ if (ah->ah_version != AR5K_AR5210) {
+ u32 delay;
+ delay = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) &
+ AR5K_PHY_RX_DELAY_M;
+ delay = (channel->hw_value & CHANNEL_CCK) ?
+ ((delay << 2) / 22) : (delay / 10);
+ if (ah->ah_bwmode == AR5K_BWMODE_10MHZ)
+ delay = delay << 1;
+ if (ah->ah_bwmode == AR5K_BWMODE_5MHZ)
+ delay = delay << 2;
+ /* XXX: /2 on turbo ? Let's be safe
+ * for now */
+ udelay(100 + delay);
+ } else {
+ mdelay(1);
+ }
+}
+
/**********************\
* RF Gain optimization *
@@ -1253,6 +1281,7 @@ static int ath5k_hw_channel(struct ath5k_hw *ah,
case AR5K_RF5111:
ret = ath5k_hw_rf5111_channel(ah, channel);
break;
+ case AR5K_RF2317:
case AR5K_RF2425:
ret = ath5k_hw_rf2425_channel(ah, channel);
break;
@@ -3237,6 +3266,13 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
/* Failed */
if (i >= 100)
return -EIO;
+
+ /* Set channel and wait for synth */
+ ret = ath5k_hw_channel(ah, channel);
+ if (ret)
+ return ret;
+
+ ath5k_hw_wait_for_synth(ah, channel);
}
/*
@@ -3251,13 +3287,53 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
if (ret)
return ret;
+ /* Write OFDM timings on 5212*/
+ if (ah->ah_version == AR5K_AR5212 &&
+ channel->hw_value & CHANNEL_OFDM) {
+
+ ret = ath5k_hw_write_ofdm_timings(ah, channel);
+ if (ret)
+ return ret;
+
+ /* Spur info is available only from EEPROM versions
+ * greater than 5.3, but the EEPROM routines will use
+ * static values for older versions */
+ if (ah->ah_mac_srev >= AR5K_SREV_AR5424)
+ ath5k_hw_set_spur_mitigation_filter(ah,
+ channel);
+ }
+
+ /* If we used fast channel switching
+ * we are done, release RF bus and
+ * fire up NF calibration.
+ *
+ * Note: Only NF calibration due to
+ * channel change, not AGC calibration
+ * since AGC is still running !
+ */
+ if (fast) {
+ /*
+ * Release RF Bus grant
+ */
+ AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_RFBUS_REQ,
+ AR5K_PHY_RFBUS_REQ_REQUEST);
+
+ /*
+ * Start NF calibration
+ */
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
+ AR5K_PHY_AGCCTL_NF);
+
+ return ret;
+ }
+
/*
* For 5210 we do all initialization using
* initvals, so we don't have to modify
* any settings (5210 also only supports
* a/aturbo modes)
*/
- if ((ah->ah_version != AR5K_AR5210) && !fast) {
+ if (ah->ah_version != AR5K_AR5210) {
/*
* Write initial RF gain settings
@@ -3276,22 +3352,6 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
if (ret)
return ret;
- /* Write OFDM timings on 5212*/
- if (ah->ah_version == AR5K_AR5212 &&
- channel->hw_value & CHANNEL_OFDM) {
-
- ret = ath5k_hw_write_ofdm_timings(ah, channel);
- if (ret)
- return ret;
-
- /* Spur info is available only from EEPROM versions
- * greater than 5.3, but the EEPROM routines will use
- * static values for older versions */
- if (ah->ah_mac_srev >= AR5K_SREV_AR5424)
- ath5k_hw_set_spur_mitigation_filter(ah,
- channel);
- }
-
/*Enable/disable 802.11b mode on 5111
(enable 2111 frequency converter + CCK)*/
if (ah->ah_radio == AR5K_RF5111) {
@@ -3322,47 +3382,20 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
*/
ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT);
+ ath5k_hw_wait_for_synth(ah, channel);
+
/*
- * On 5211+ read activation -> rx delay
- * and use it.
+ * Perform ADC test to see if baseband is ready
+ * Set tx hold and check adc test register
*/
- if (ah->ah_version != AR5K_AR5210) {
- u32 delay;
- delay = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) &
- AR5K_PHY_RX_DELAY_M;
- delay = (channel->hw_value & CHANNEL_CCK) ?
- ((delay << 2) / 22) : (delay / 10);
- if (ah->ah_bwmode == AR5K_BWMODE_10MHZ)
- delay = delay << 1;
- if (ah->ah_bwmode == AR5K_BWMODE_5MHZ)
- delay = delay << 2;
- /* XXX: /2 on turbo ? Let's be safe
- * for now */
- udelay(100 + delay);
- } else {
- mdelay(1);
- }
-
- if (fast)
- /*
- * Release RF Bus grant
- */
- AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_RFBUS_REQ,
- AR5K_PHY_RFBUS_REQ_REQUEST);
- else {
- /*
- * Perform ADC test to see if baseband is ready
- * Set tx hold and check adc test register
- */
- phy_tst1 = ath5k_hw_reg_read(ah, AR5K_PHY_TST1);
- ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1);
- for (i = 0; i <= 20; i++) {
- if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10))
- break;
- udelay(200);
- }
- ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1);
+ phy_tst1 = ath5k_hw_reg_read(ah, AR5K_PHY_TST1);
+ ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1);
+ for (i = 0; i <= 20; i++) {
+ if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10))
+ break;
+ udelay(200);
}
+ ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1);
/*
* Start automatic gain control calibration
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 23838e3..1a7fa6e 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -21,7 +21,6 @@
#include <linux/device.h>
#include <linux/leds.h>
#include <linux/completion.h>
-#include <linux/pm_qos_params.h>
#include "debug.h"
#include "common.h"
@@ -57,8 +56,6 @@ struct ath_node;
#define A_MAX(a, b) ((a) > (b) ? (a) : (b))
-#define ATH9K_PM_QOS_DEFAULT_VALUE 55
-
#define TSF_TO_TU(_h,_l) \
((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
@@ -633,8 +630,6 @@ struct ath_softc {
struct ath_descdma txsdma;
struct ath_ant_comb ant_comb;
-
- struct pm_qos_request_list pm_qos_req;
};
struct ath_wiphy {
@@ -666,7 +661,6 @@ static inline void ath_read_cachesize(struct ath_common *common, int *csz)
extern struct ieee80211_ops ath9k_ops;
extern int ath9k_modparam_nohwcrypt;
extern int led_blink;
-extern int ath9k_pm_qos_value;
extern bool is_ath9k_unloaded;
irqreturn_t ath_isr(int irq, void *dev);
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 5ab3084..07b1633 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -219,8 +219,9 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev)
struct tx_buf *tx_buf = NULL;
struct sk_buff *nskb = NULL;
int ret = 0, i;
- u16 *hdr, tx_skb_cnt = 0;
+ u16 tx_skb_cnt = 0;
u8 *buf;
+ __le16 *hdr;
if (hif_dev->tx.tx_skb_cnt == 0)
return 0;
@@ -245,9 +246,9 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev)
buf = tx_buf->buf;
buf += tx_buf->offset;
- hdr = (u16 *)buf;
- *hdr++ = nskb->len;
- *hdr++ = ATH_USB_TX_STREAM_MODE_TAG;
+ hdr = (__le16 *)buf;
+ *hdr++ = cpu_to_le16(nskb->len);
+ *hdr++ = cpu_to_le16(ATH_USB_TX_STREAM_MODE_TAG);
buf += 4;
memcpy(buf, nskb->data, nskb->len);
tx_buf->len = nskb->len + 4;
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 087a6a9..a033d01 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -41,10 +41,6 @@ static int ath9k_btcoex_enable;
module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444);
MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
-int ath9k_pm_qos_value = ATH9K_PM_QOS_DEFAULT_VALUE;
-module_param_named(pmqos, ath9k_pm_qos_value, int, S_IRUSR | S_IRGRP | S_IROTH);
-MODULE_PARM_DESC(pmqos, "User specified PM-QOS value");
-
bool is_ath9k_unloaded;
/* We use the hw_value as an index into our private channel structure */
@@ -762,9 +758,6 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
ath_init_leds(sc);
ath_start_rfkill_poll(sc);
- pm_qos_add_request(&sc->pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
- PM_QOS_DEFAULT_VALUE);
-
return 0;
error_world:
@@ -831,7 +824,6 @@ void ath9k_deinit_device(struct ath_softc *sc)
}
ieee80211_unregister_hw(hw);
- pm_qos_remove_request(&sc->pm_qos_req);
ath_rx_cleanup(sc);
ath_tx_cleanup(sc);
ath9k_deinit_softc(sc);
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 180170d..2915b11 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -885,7 +885,7 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
struct ath_common *common = ath9k_hw_common(ah);
if (!(ints & ATH9K_INT_GLOBAL))
- ath9k_hw_enable_interrupts(ah);
+ ath9k_hw_disable_interrupts(ah);
ath_dbg(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints);
@@ -963,7 +963,8 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
}
- ath9k_hw_enable_interrupts(ah);
+ if (ints & ATH9K_INT_GLOBAL)
+ ath9k_hw_enable_interrupts(ah);
return;
}
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index da5c645..a09d15f 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1173,12 +1173,6 @@ static int ath9k_start(struct ieee80211_hw *hw)
ath9k_btcoex_timer_resume(sc);
}
- /* User has the option to provide pm-qos value as a module
- * parameter rather than using the default value of
- * 'ATH9K_PM_QOS_DEFAULT_VALUE'.
- */
- pm_qos_update_request(&sc->pm_qos_req, ath9k_pm_qos_value);
-
if (ah->caps.pcie_lcr_extsync_en && common->bus_ops->extn_synch_en)
common->bus_ops->extn_synch_en(common);
@@ -1345,8 +1339,6 @@ static void ath9k_stop(struct ieee80211_hw *hw)
sc->sc_flags |= SC_OP_INVALID;
- pm_qos_update_request(&sc->pm_qos_req, PM_QOS_DEFAULT_VALUE);
-
mutex_unlock(&sc->mutex);
ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n");
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index 537732e..f82c400 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -118,6 +118,8 @@ static struct usb_device_id carl9170_usb_ids[] = {
{ USB_DEVICE(0x057c, 0x8402) },
/* Qwest/Actiontec 802AIN Wireless N USB Network Adapter */
{ USB_DEVICE(0x1668, 0x1200) },
+ /* Airlive X.USB a/b/g/n */
+ { USB_DEVICE(0x1b75, 0x9170) },
/* terminate */
{}
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index a9b852b..39b6f16 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -402,72 +402,6 @@ static void iwl3945_accumulative_statistics(struct iwl_priv *priv,
}
#endif
-/**
- * iwl3945_good_plcp_health - checks for plcp error.
- *
- * When the plcp error is exceeding the thresholds, reset the radio
- * to improve the throughput.
- */
-static bool iwl3945_good_plcp_health(struct iwl_priv *priv,
- struct iwl_rx_packet *pkt)
-{
- bool rc = true;
- struct iwl3945_notif_statistics current_stat;
- int combined_plcp_delta;
- unsigned int plcp_msec;
- unsigned long plcp_received_jiffies;
-
- if (priv->cfg->base_params->plcp_delta_threshold ==
- IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
- IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
- return rc;
- }
- memcpy(&current_stat, pkt->u.raw, sizeof(struct
- iwl3945_notif_statistics));
- /*
- * check for plcp_err and trigger radio reset if it exceeds
- * the plcp error threshold plcp_delta.
- */
- plcp_received_jiffies = jiffies;
- plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies -
- (long) priv->plcp_jiffies);
- priv->plcp_jiffies = plcp_received_jiffies;
- /*
- * check to make sure plcp_msec is not 0 to prevent division
- * by zero.
- */
- if (plcp_msec) {
- combined_plcp_delta =
- (le32_to_cpu(current_stat.rx.ofdm.plcp_err) -
- le32_to_cpu(priv->_3945.statistics.rx.ofdm.plcp_err));
-
- if ((combined_plcp_delta > 0) &&
- ((combined_plcp_delta * 100) / plcp_msec) >
- priv->cfg->base_params->plcp_delta_threshold) {
- /*
- * if plcp_err exceed the threshold, the following
- * data is printed in csv format:
- * Text: plcp_err exceeded %d,
- * Received ofdm.plcp_err,
- * Current ofdm.plcp_err,
- * combined_plcp_delta,
- * plcp_msec
- */
- IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, "
- "%u, %d, %u mSecs\n",
- priv->cfg->base_params->plcp_delta_threshold,
- le32_to_cpu(current_stat.rx.ofdm.plcp_err),
- combined_plcp_delta, plcp_msec);
- /*
- * Reset the RF radio due to the high plcp
- * error rate
- */
- rc = false;
- }
- }
- return rc;
-}
-
void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
@@ -2734,7 +2668,6 @@ static struct iwl_lib_ops iwl3945_lib = {
.isr_ops = {
.isr = iwl_isr_legacy,
},
- .check_plcp_health = iwl3945_good_plcp_health,
.debugfs_ops = {
.rx_stats_read = iwl3945_ucode_rx_stats_read,
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 79ab0a6..537fb8c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -51,7 +51,7 @@
#include "iwl-agn-debugfs.h"
/* Highest firmware API version supported */
-#define IWL5000_UCODE_API_MAX 2
+#define IWL5000_UCODE_API_MAX 5
#define IWL5150_UCODE_API_MAX 2
/* Lowest firmware API version supported */
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index 1eacba4..0494d7b 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -199,6 +199,7 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
while (i != idx) {
u16 len;
struct sk_buff *skb;
+ dma_addr_t dma_addr;
desc = &ring[i];
len = le16_to_cpu(desc->len);
skb = rx_buf[i];
@@ -216,17 +217,20 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
len = priv->common.rx_mtu;
}
+ dma_addr = le32_to_cpu(desc->host_addr);
+ pci_dma_sync_single_for_cpu(priv->pdev, dma_addr,
+ priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
skb_put(skb, len);
if (p54_rx(dev, skb)) {
- pci_unmap_single(priv->pdev,
- le32_to_cpu(desc->host_addr),
- priv->common.rx_mtu + 32,
- PCI_DMA_FROMDEVICE);
+ pci_unmap_single(priv->pdev, dma_addr,
+ priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
rx_buf[i] = NULL;
- desc->host_addr = 0;
+ desc->host_addr = cpu_to_le32(0);
} else {
skb_trim(skb, 0);
+ pci_dma_sync_single_for_device(priv->pdev, dma_addr,
+ priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
}
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index 21713a7..9b344a9 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -98,6 +98,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
{USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */
{USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */
{USB_DEVICE(0x1668, 0x1050)}, /* Actiontec 802UIG-1 */
+ {USB_DEVICE(0x1740, 0x1000)}, /* Senao NUB-350 */
{USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */
{USB_DEVICE(0x2001, 0x3705)}, /* D-Link DWL-G120 rev C1 */
{USB_DEVICE(0x413c, 0x5513)}, /* Dell WLA3310 USB Wireless Adapter */
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 848cc2c..518542b 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -2597,6 +2597,9 @@ static int rndis_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
__le32 mode;
int ret;
+ if (priv->device_type != RNDIS_BCM4320B)
+ return -ENOTSUPP;
+
netdev_dbg(usbdev->net, "%s(): %s, %d\n", __func__,
enabled ? "enabled" : "disabled",
timeout);
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index aa97971..3b3f1e4 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -652,6 +652,12 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry,
*/
rxdesc->flags |= RX_FLAG_IV_STRIPPED;
+ /*
+ * The hardware has already checked the Michael Mic and has
+ * stripped it from the frame. Signal this to mac80211.
+ */
+ rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
+
if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
rxdesc->flags |= RX_FLAG_DECRYPTED;
else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
@@ -1065,6 +1071,8 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
{ PCI_DEVICE(0x1814, 0x3390), PCI_DEVICE_DATA(&rt2800pci_ops) },
#endif
#ifdef CONFIG_RT2800PCI_RT35XX
+ { PCI_DEVICE(0x1432, 0x7711), PCI_DEVICE_DATA(&rt2800pci_ops) },
+ { PCI_DEVICE(0x1432, 0x7722), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1814, 0x3060), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1814, 0x3062), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1814, 0x3562), PCI_DEVICE_DATA(&rt2800pci_ops) },
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index b97a4a5..197a36c 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -486,6 +486,12 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
*/
rxdesc->flags |= RX_FLAG_IV_STRIPPED;
+ /*
+ * The hardware has already checked the Michael Mic and has
+ * stripped it from the frame. Signal this to mac80211.
+ */
+ rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
+
if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
rxdesc->flags |= RX_FLAG_DECRYPTED;
else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
index ffedfd4..ea15800 100644
--- a/drivers/nfc/Kconfig
+++ b/drivers/nfc/Kconfig
@@ -3,7 +3,7 @@
#
menuconfig NFC_DEVICES
- bool "NFC devices"
+ bool "Near Field Communication (NFC) devices"
default n
---help---
You'll have to say Y if your computer contains an NFC device that
diff --git a/drivers/nfc/pn544.c b/drivers/nfc/pn544.c
index bae6472..724f65d 100644
--- a/drivers/nfc/pn544.c
+++ b/drivers/nfc/pn544.c
@@ -60,7 +60,7 @@ enum pn544_irq {
struct pn544_info {
struct miscdevice miscdev;
struct i2c_client *i2c_dev;
- struct regulator_bulk_data regs[2];
+ struct regulator_bulk_data regs[3];
enum pn544_state state;
wait_queue_head_t read_wait;
@@ -74,6 +74,7 @@ struct pn544_info {
static const char reg_vdd_io[] = "Vdd_IO";
static const char reg_vbat[] = "VBat";
+static const char reg_vsim[] = "VSim";
/* sysfs interface */
static ssize_t pn544_test(struct device *dev,
@@ -740,6 +741,7 @@ static int __devinit pn544_probe(struct i2c_client *client,
info->regs[0].supply = reg_vdd_io;
info->regs[1].supply = reg_vbat;
+ info->regs[2].supply = reg_vsim;
r = regulator_bulk_get(&client->dev, ARRAY_SIZE(info->regs),
info->regs);
if (r < 0)
diff --git a/drivers/of/pdt.c b/drivers/of/pdt.c
index 28295d0..4d87b5d 100644
--- a/drivers/of/pdt.c
+++ b/drivers/of/pdt.c
@@ -36,19 +36,55 @@ unsigned int of_pdt_unique_id __initdata;
(p)->unique_id = of_pdt_unique_id++; \
} while (0)
-static inline const char *of_pdt_node_name(struct device_node *dp)
+static char * __init of_pdt_build_full_name(struct device_node *dp)
{
- return dp->path_component_name;
+ int len, ourlen, plen;
+ char *n;
+
+ dp->path_component_name = build_path_component(dp);
+
+ plen = strlen(dp->parent->full_name);
+ ourlen = strlen(dp->path_component_name);
+ len = ourlen + plen + 2;
+
+ n = prom_early_alloc(len);
+ strcpy(n, dp->parent->full_name);
+ if (!of_node_is_root(dp->parent)) {
+ strcpy(n + plen, "/");
+ plen++;
+ }
+ strcpy(n + plen, dp->path_component_name);
+
+ return n;
}
-#else
+#else /* CONFIG_SPARC */
static inline void of_pdt_incr_unique_id(void *p) { }
static inline void irq_trans_init(struct device_node *dp) { }
-static inline const char *of_pdt_node_name(struct device_node *dp)
+static char * __init of_pdt_build_full_name(struct device_node *dp)
{
- return dp->name;
+ static int failsafe_id = 0; /* for generating unique names on failure */
+ char *buf;
+ int len;
+
+ if (of_pdt_prom_ops->pkg2path(dp->phandle, NULL, 0, &len))
+ goto failsafe;
+
+ buf = prom_early_alloc(len + 1);
+ if (of_pdt_prom_ops->pkg2path(dp->phandle, buf, len, &len))
+ goto failsafe;
+ return buf;
+
+ failsafe:
+ buf = prom_early_alloc(strlen(dp->parent->full_name) +
+ strlen(dp->name) + 16);
+ sprintf(buf, "%s/%s@unknown%i",
+ of_node_is_root(dp->parent) ? "" : dp->parent->full_name,
+ dp->name, failsafe_id++);
+ pr_err("%s: pkg2path failed; assigning %s\n", __func__, buf);
+ return buf;
}
#endif /* !CONFIG_SPARC */
@@ -132,47 +168,6 @@ static char * __init of_pdt_get_one_property(phandle node, const char *name)
return buf;
}
-static char * __init of_pdt_try_pkg2path(phandle node)
-{
- char *res, *buf = NULL;
- int len;
-
- if (!of_pdt_prom_ops->pkg2path)
- return NULL;
-
- if (of_pdt_prom_ops->pkg2path(node, buf, 0, &len))
- return NULL;
- buf = prom_early_alloc(len + 1);
- if (of_pdt_prom_ops->pkg2path(node, buf, len, &len)) {
- pr_err("%s: package-to-path failed\n", __func__);
- return NULL;
- }
-
- res = strrchr(buf, '/');
- if (!res) {
- pr_err("%s: couldn't find / in %s\n", __func__, buf);
- return NULL;
- }
- return res+1;
-}
-
-/*
- * When fetching the node's name, first try using package-to-path; if
- * that fails (either because the arch hasn't supplied a PROM callback,
- * or some other random failure), fall back to just looking at the node's
- * 'name' property.
- */
-static char * __init of_pdt_build_name(phandle node)
-{
- char *buf;
-
- buf = of_pdt_try_pkg2path(node);
- if (!buf)
- buf = of_pdt_get_one_property(node, "name");
-
- return buf;
-}
-
static struct device_node * __init of_pdt_create_node(phandle node,
struct device_node *parent)
{
@@ -187,7 +182,7 @@ static struct device_node * __init of_pdt_create_node(phandle node,
kref_init(&dp->kref);
- dp->name = of_pdt_build_name(node);
+ dp->name = of_pdt_get_one_property(node, "name");
dp->type = of_pdt_get_one_property(node, "device_type");
dp->phandle = node;
@@ -198,26 +193,6 @@ static struct device_node * __init of_pdt_create_node(phandle node,
return dp;
}
-static char * __init of_pdt_build_full_name(struct device_node *dp)
-{
- int len, ourlen, plen;
- char *n;
-
- plen = strlen(dp->parent->full_name);
- ourlen = strlen(of_pdt_node_name(dp));
- len = ourlen + plen + 2;
-
- n = prom_early_alloc(len);
- strcpy(n, dp->parent->full_name);
- if (!of_node_is_root(dp->parent)) {
- strcpy(n + plen, "/");
- plen++;
- }
- strcpy(n + plen, of_pdt_node_name(dp));
-
- return n;
-}
-
static struct device_node * __init of_pdt_build_tree(struct device_node *parent,
phandle node,
struct device_node ***nextp)
@@ -240,9 +215,6 @@ static struct device_node * __init of_pdt_build_tree(struct device_node *parent,
*(*nextp) = dp;
*nextp = &dp->allnext;
-#if defined(CONFIG_SPARC)
- dp->path_component_name = build_path_component(dp);
-#endif
dp->full_name = of_pdt_build_full_name(dp);
dp->child = of_pdt_build_tree(dp,
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
index 0bdda5b..42fbf1a 100644
--- a/drivers/pcmcia/pcmcia_resource.c
+++ b/drivers/pcmcia/pcmcia_resource.c
@@ -518,6 +518,8 @@ int pcmcia_enable_device(struct pcmcia_device *p_dev)
flags |= CONF_ENABLE_IOCARD;
if (flags & CONF_ENABLE_IOCARD)
s->socket.flags |= SS_IOCARD;
+ if (flags & CONF_ENABLE_ZVCARD)
+ s->socket.flags |= SS_ZVCARD | SS_IOCARD;
if (flags & CONF_ENABLE_SPKR) {
s->socket.flags |= SS_SPKR_ENA;
status = CCSR_AUDIO_ENA;
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c
index 3755e7c..2c54054 100644
--- a/drivers/pcmcia/pxa2xx_base.c
+++ b/drivers/pcmcia/pxa2xx_base.c
@@ -215,7 +215,7 @@ pxa2xx_pcmcia_frequency_change(struct soc_pcmcia_socket *skt,
}
#endif
-static void pxa2xx_configure_sockets(struct device *dev)
+void pxa2xx_configure_sockets(struct device *dev)
{
struct pcmcia_low_level *ops = dev->platform_data;
/*
diff --git a/drivers/pcmcia/pxa2xx_base.h b/drivers/pcmcia/pxa2xx_base.h
index bb62ea8..b609b45 100644
--- a/drivers/pcmcia/pxa2xx_base.h
+++ b/drivers/pcmcia/pxa2xx_base.h
@@ -1,3 +1,4 @@
int pxa2xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt);
void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops);
+void pxa2xx_configure_sockets(struct device *dev);
diff --git a/drivers/pcmcia/pxa2xx_colibri.c b/drivers/pcmcia/pxa2xx_colibri.c
index c3f7219..a520395 100644
--- a/drivers/pcmcia/pxa2xx_colibri.c
+++ b/drivers/pcmcia/pxa2xx_colibri.c
@@ -181,6 +181,9 @@ static int __init colibri_pcmcia_init(void)
{
int ret;
+ if (!machine_is_colibri() && !machine_is_colibri320())
+ return -ENODEV;
+
colibri_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
if (!colibri_pcmcia_device)
return -ENOMEM;
diff --git a/drivers/pcmcia/pxa2xx_lubbock.c b/drivers/pcmcia/pxa2xx_lubbock.c
index b9f8c8f..25afe63 100644
--- a/drivers/pcmcia/pxa2xx_lubbock.c
+++ b/drivers/pcmcia/pxa2xx_lubbock.c
@@ -226,6 +226,7 @@ int pcmcia_lubbock_init(struct sa1111_dev *sadev)
lubbock_set_misc_wr((1 << 15) | (1 << 14), 0);
pxa2xx_drv_pcmcia_ops(&lubbock_pcmcia_ops);
+ pxa2xx_configure_sockets(&sadev->dev);
ret = sa1111_pcmcia_add(sadev, &lubbock_pcmcia_ops,
pxa2xx_drv_pcmcia_add_one);
}
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index d163bc2..a59af5b 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -227,7 +227,7 @@ config SONYPI_COMPAT
config IDEAPAD_LAPTOP
tristate "Lenovo IdeaPad Laptop Extras"
depends on ACPI
- depends on RFKILL
+ depends on RFKILL && INPUT
select INPUT_SPARSEKMAP
help
This is a driver for the rfkill switches on Lenovo IdeaPad netbooks.
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index c5c4b8c..38b34a7 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -84,7 +84,7 @@ MODULE_LICENSE("GPL");
*/
#define AMW0_GUID1 "67C3371D-95A3-4C37-BB61-DD47B491DAAB"
#define AMW0_GUID2 "431F16ED-0C2B-444C-B267-27DEB140CF9C"
-#define WMID_GUID1 "6AF4F258-B401-42fd-BE91-3D4AC2D7C0D3"
+#define WMID_GUID1 "6AF4F258-B401-42FD-BE91-3D4AC2D7C0D3"
#define WMID_GUID2 "95764E09-FB56-4e83-B31A-37761F60994A"
#define WMID_GUID3 "61EF69EA-865C-4BC3-A502-A0DEBA0CB531"
@@ -1280,7 +1280,7 @@ static ssize_t set_bool_threeg(struct device *dev,
return -EINVAL;
return count;
}
-static DEVICE_ATTR(threeg, S_IWUGO | S_IRUGO | S_IWUSR, show_bool_threeg,
+static DEVICE_ATTR(threeg, S_IRUGO | S_IWUSR, show_bool_threeg,
set_bool_threeg);
static ssize_t show_interface(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
index 4633fd8..fe49593 100644
--- a/drivers/platform/x86/asus_acpi.c
+++ b/drivers/platform/x86/asus_acpi.c
@@ -1081,14 +1081,8 @@ static int asus_hotk_add_fs(struct acpi_device *device)
struct proc_dir_entry *proc;
mode_t mode;
- /*
- * If parameter uid or gid is not changed, keep the default setting for
- * our proc entries (-rw-rw-rw-) else, it means we care about security,
- * and then set to -rw-rw----
- */
-
if ((asus_uid == 0) && (asus_gid == 0)) {
- mode = S_IFREG | S_IRUGO | S_IWUGO;
+ mode = S_IFREG | S_IRUGO | S_IWUSR | S_IWGRP;
} else {
mode = S_IFREG | S_IRUSR | S_IRGRP | S_IWUSR | S_IWGRP;
printk(KERN_WARNING " asus_uid and asus_gid parameters are "
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index 34657f9..ad24ef3 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -290,9 +290,12 @@ static int dell_rfkill_set(void *data, bool blocked)
dell_send_request(buffer, 17, 11);
/* If the hardware switch controls this radio, and the hardware
- switch is disabled, don't allow changing the software state */
+ switch is disabled, don't allow changing the software state.
+ If the hardware switch is reported as not supported, always
+ fire the SMI to toggle the killswitch. */
if ((hwswitch_state & BIT(hwswitch_bit)) &&
- !(buffer->output[1] & BIT(16))) {
+ !(buffer->output[1] & BIT(16)) &&
+ (buffer->output[1] & BIT(0))) {
ret = -EINVAL;
goto out;
}
@@ -398,6 +401,23 @@ static const struct file_operations dell_debugfs_fops = {
static void dell_update_rfkill(struct work_struct *ignored)
{
+ int status;
+
+ get_buffer();
+ dell_send_request(buffer, 17, 11);
+ status = buffer->output[1];
+ release_buffer();
+
+ /* if hardware rfkill is not supported, set it explicitly */
+ if (!(status & BIT(0))) {
+ if (wifi_rfkill)
+ dell_rfkill_set((void *)1, !((status & BIT(17)) >> 17));
+ if (bluetooth_rfkill)
+ dell_rfkill_set((void *)2, !((status & BIT(18)) >> 18));
+ if (wwan_rfkill)
+ dell_rfkill_set((void *)3, !((status & BIT(19)) >> 19));
+ }
+
if (wifi_rfkill)
dell_rfkill_query(wifi_rfkill, (void *)1);
if (bluetooth_rfkill)
diff --git a/drivers/platform/x86/intel_pmic_gpio.c b/drivers/platform/x86/intel_pmic_gpio.c
index 930e627..61433d4 100644
--- a/drivers/platform/x86/intel_pmic_gpio.c
+++ b/drivers/platform/x86/intel_pmic_gpio.c
@@ -60,69 +60,20 @@ enum pmic_gpio_register {
#define GPOSW_DOU 0x08
#define GPOSW_RDRV 0x30
+#define GPIO_UPDATE_TYPE 0x80000000
#define NUM_GPIO 24
-struct pmic_gpio_irq {
- spinlock_t lock;
- u32 trigger[NUM_GPIO];
- u32 dirty;
- struct work_struct work;
-};
-
-
struct pmic_gpio {
+ struct mutex buslock;
struct gpio_chip chip;
- struct pmic_gpio_irq irqtypes;
void *gpiointr;
int irq;
unsigned irq_base;
+ unsigned int update_type;
+ u32 trigger_type;
};
-static void pmic_program_irqtype(int gpio, int type)
-{
- if (type & IRQ_TYPE_EDGE_RISING)
- intel_scu_ipc_update_register(GPIO0 + gpio, 0x20, 0x20);
- else
- intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x20);
-
- if (type & IRQ_TYPE_EDGE_FALLING)
- intel_scu_ipc_update_register(GPIO0 + gpio, 0x10, 0x10);
- else
- intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x10);
-};
-
-static void pmic_irqtype_work(struct work_struct *work)
-{
- struct pmic_gpio_irq *t =
- container_of(work, struct pmic_gpio_irq, work);
- unsigned long flags;
- int i;
- u16 type;
-
- spin_lock_irqsave(&t->lock, flags);
- /* As we drop the lock, we may need multiple scans if we race the
- pmic_irq_type function */
- while (t->dirty) {
- /*
- * For each pin that has the dirty bit set send an IPC
- * message to configure the hardware via the PMIC
- */
- for (i = 0; i < NUM_GPIO; i++) {
- if (!(t->dirty & (1 << i)))
- continue;
- t->dirty &= ~(1 << i);
- /* We can't trust the array entry or dirty
- once the lock is dropped */
- type = t->trigger[i];
- spin_unlock_irqrestore(&t->lock, flags);
- pmic_program_irqtype(i, type);
- spin_lock_irqsave(&t->lock, flags);
- }
- }
- spin_unlock_irqrestore(&t->lock, flags);
-}
-
static int pmic_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
if (offset > 8) {
@@ -190,25 +141,24 @@ static void pmic_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
1 << (offset - 16));
}
-static int pmic_irq_type(unsigned irq, unsigned type)
+/*
+ * This is called from genirq with pg->buslock locked and
+ * irq_desc->lock held. We can not access the scu bus here, so we
+ * store the change and update in the bus_sync_unlock() function below
+ */
+static int pmic_irq_type(struct irq_data *data, unsigned type)
{
- struct pmic_gpio *pg = get_irq_chip_data(irq);
- u32 gpio = irq - pg->irq_base;
- unsigned long flags;
+ struct pmic_gpio *pg = irq_data_get_irq_chip_data(data);
+ u32 gpio = data->irq - pg->irq_base;
if (gpio >= pg->chip.ngpio)
return -EINVAL;
- spin_lock_irqsave(&pg->irqtypes.lock, flags);
- pg->irqtypes.trigger[gpio] = type;
- pg->irqtypes.dirty |= (1 << gpio);
- spin_unlock_irqrestore(&pg->irqtypes.lock, flags);
- schedule_work(&pg->irqtypes.work);
+ pg->trigger_type = type;
+ pg->update_type = gpio | GPIO_UPDATE_TYPE;
return 0;
}
-
-
static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
struct pmic_gpio *pg = container_of(chip, struct pmic_gpio, chip);
@@ -217,38 +167,32 @@ static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
}
/* the gpiointr register is read-clear, so just do nothing. */
-static void pmic_irq_unmask(unsigned irq)
-{
-};
+static void pmic_irq_unmask(struct irq_data *data) { }
-static void pmic_irq_mask(unsigned irq)
-{
-};
+static void pmic_irq_mask(struct irq_data *data) { }
static struct irq_chip pmic_irqchip = {
.name = "PMIC-GPIO",
- .mask = pmic_irq_mask,
- .unmask = pmic_irq_unmask,
- .set_type = pmic_irq_type,
+ .irq_mask = pmic_irq_mask,
+ .irq_unmask = pmic_irq_unmask,
+ .irq_set_type = pmic_irq_type,
};
-static void pmic_irq_handler(unsigned irq, struct irq_desc *desc)
+static irqreturn_t pmic_irq_handler(int irq, void *data)
{
- struct pmic_gpio *pg = (struct pmic_gpio *)get_irq_data(irq);
+ struct pmic_gpio *pg = data;
u8 intsts = *((u8 *)pg->gpiointr + 4);
int gpio;
+ irqreturn_t ret = IRQ_NONE;
for (gpio = 0; gpio < 8; gpio++) {
if (intsts & (1 << gpio)) {
pr_debug("pmic pin %d triggered\n", gpio);
generic_handle_irq(pg->irq_base + gpio);
+ ret = IRQ_HANDLED;
}
}
-
- if (desc->chip->irq_eoi)
- desc->chip->irq_eoi(irq_get_irq_data(irq));
- else
- dev_warn(pg->chip.dev, "missing EOI handler for irq %d\n", irq);
+ return ret;
}
static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev)
@@ -297,8 +241,7 @@ static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev)
pg->chip.can_sleep = 1;
pg->chip.dev = dev;
- INIT_WORK(&pg->irqtypes.work, pmic_irqtype_work);
- spin_lock_init(&pg->irqtypes.lock);
+ mutex_init(&pg->buslock);
pg->chip.dev = dev;
retval = gpiochip_add(&pg->chip);
@@ -306,8 +249,13 @@ static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev)
printk(KERN_ERR "%s: Can not add pmic gpio chip.\n", __func__);
goto err;
}
- set_irq_data(pg->irq, pg);
- set_irq_chained_handler(pg->irq, pmic_irq_handler);
+
+ retval = request_irq(pg->irq, pmic_irq_handler, 0, "pmic", pg);
+ if (retval) {
+ printk(KERN_WARNING "pmic: Interrupt request failed\n");
+ goto err;
+ }
+
for (i = 0; i < 8; i++) {
set_irq_chip_and_handler_name(i + pg->irq_base, &pmic_irqchip,
handle_simple_irq, "demux");
diff --git a/drivers/platform/x86/tc1100-wmi.c b/drivers/platform/x86/tc1100-wmi.c
index 1fe0f1f..865ef78 100644
--- a/drivers/platform/x86/tc1100-wmi.c
+++ b/drivers/platform/x86/tc1100-wmi.c
@@ -162,7 +162,7 @@ set_bool_##value(struct device *dev, struct device_attribute *attr, \
return -EINVAL; \
return count; \
} \
-static DEVICE_ATTR(value, S_IWUGO | S_IRUGO | S_IWUSR, \
+static DEVICE_ATTR(value, S_IRUGO | S_IWUSR, \
show_bool_##value, set_bool_##value);
show_set_bool(wireless, TC1100_INSTANCE_WIRELESS);
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index dd59958..eb99223 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -2275,16 +2275,12 @@ static void tpacpi_input_send_key(const unsigned int scancode)
if (keycode != KEY_RESERVED) {
mutex_lock(&tpacpi_inputdev_send_mutex);
+ input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN, scancode);
input_report_key(tpacpi_inputdev, keycode, 1);
- if (keycode == KEY_UNKNOWN)
- input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN,
- scancode);
input_sync(tpacpi_inputdev);
+ input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN, scancode);
input_report_key(tpacpi_inputdev, keycode, 0);
- if (keycode == KEY_UNKNOWN)
- input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN,
- scancode);
input_sync(tpacpi_inputdev);
mutex_unlock(&tpacpi_inputdev_send_mutex);
diff --git a/drivers/pps/generators/Kconfig b/drivers/pps/generators/Kconfig
index f3a73dd..e4c4f3d 100644
--- a/drivers/pps/generators/Kconfig
+++ b/drivers/pps/generators/Kconfig
@@ -6,7 +6,7 @@ comment "PPS generators support"
config PPS_GENERATOR_PARPORT
tristate "Parallel port PPS signal generator"
- depends on PARPORT
+ depends on PARPORT && BROKEN
help
If you say yes here you get support for a PPS signal generator which
utilizes STROBE pin of a parallel port to send PPS signals. It uses
diff --git a/drivers/pps/kapi.c b/drivers/pps/kapi.c
index cba1b43..a4e8eb9 100644
--- a/drivers/pps/kapi.c
+++ b/drivers/pps/kapi.c
@@ -168,7 +168,7 @@ void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event,
{
unsigned long flags;
int captured = 0;
- struct pps_ktime ts_real;
+ struct pps_ktime ts_real = { .sec = 0, .nsec = 0, .flags = 0 };
/* check event type */
BUG_ON((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0);
diff --git a/drivers/rapidio/rio-sysfs.c b/drivers/rapidio/rio-sysfs.c
index 76b4185..1269fbd 100644
--- a/drivers/rapidio/rio-sysfs.c
+++ b/drivers/rapidio/rio-sysfs.c
@@ -77,9 +77,9 @@ rio_read_config(struct file *filp, struct kobject *kobj,
/* Several chips lock up trying to read undefined config space */
if (capable(CAP_SYS_ADMIN))
- size = 0x200000;
+ size = RIO_MAINT_SPACE_SZ;
- if (off > size)
+ if (off >= size)
return 0;
if (off + count > size) {
size -= off;
@@ -147,10 +147,10 @@ rio_write_config(struct file *filp, struct kobject *kobj,
loff_t init_off = off;
u8 *data = (u8 *) buf;
- if (off > 0x200000)
+ if (off >= RIO_MAINT_SPACE_SZ)
return 0;
- if (off + count > 0x200000) {
- size = 0x200000 - off;
+ if (off + count > RIO_MAINT_SPACE_SZ) {
+ size = RIO_MAINT_SPACE_SZ - off;
count = size;
}
@@ -200,7 +200,7 @@ static struct bin_attribute rio_config_attr = {
.name = "config",
.mode = S_IRUGO | S_IWUSR,
},
- .size = 0x200000,
+ .size = RIO_MAINT_SPACE_SZ,
.read = rio_read_config,
.write = rio_write_config,
};
diff --git a/drivers/regulator/mc13xxx-regulator-core.c b/drivers/regulator/mc13xxx-regulator-core.c
index f53d31b..2bb5de1 100644
--- a/drivers/regulator/mc13xxx-regulator-core.c
+++ b/drivers/regulator/mc13xxx-regulator-core.c
@@ -174,7 +174,7 @@ static int mc13xxx_regulator_get_voltage(struct regulator_dev *rdev)
dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val);
- BUG_ON(val < 0 || val > mc13xxx_regulators[id].desc.n_voltages);
+ BUG_ON(val > mc13xxx_regulators[id].desc.n_voltages);
return mc13xxx_regulators[id].voltages[val];
}
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index 8b0d2c4..06df898 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -120,6 +120,7 @@ static unsigned int wm831x_dcdc_get_mode(struct regulator_dev *rdev)
return REGULATOR_MODE_IDLE;
default:
BUG();
+ return -EINVAL;
}
}
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index cdd9719..4941cad 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -97,6 +97,18 @@ config RTC_INTF_DEV
If unsure, say Y.
+config RTC_INTF_DEV_UIE_EMUL
+ bool "RTC UIE emulation on dev interface"
+ depends on RTC_INTF_DEV
+ help
+ Provides an emulation for RTC_UIE if the underlying rtc chip
+ driver does not expose RTC_UIE ioctls. Those requests generate
+ once-per-second update interrupts, used for synchronization.
+
+ The emulation code will read the time from the hardware
+ clock several times per second, please enable this option
+ only if you know that you really need it.
+
config RTC_DRV_TEST
tristate "Test driver/device"
help
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index a0c0196..cb2f072 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -209,9 +209,8 @@ int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled)
}
if (err)
- return err;
-
- if (!rtc->ops)
+ /* nothing */;
+ else if (!rtc->ops)
err = -ENODEV;
else if (!rtc->ops->alarm_irq_enable)
err = -EINVAL;
@@ -229,6 +228,12 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
if (err)
return err;
+#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
+ if (enabled == 0 && rtc->uie_irq_active) {
+ mutex_unlock(&rtc->ops_lock);
+ return rtc_dev_update_irq_enable_emul(rtc, 0);
+ }
+#endif
/* make sure we're changing state */
if (rtc->uie_rtctimer.enabled == enabled)
goto out;
@@ -248,6 +253,16 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
out:
mutex_unlock(&rtc->ops_lock);
+#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
+ /*
+ * Enable emulation if the driver did not provide
+ * the update_irq_enable function pointer or if returned
+ * -EINVAL to signal that it has been configured without
+ * interrupts or that are not available at the moment.
+ */
+ if (err == -EINVAL)
+ err = rtc_dev_update_irq_enable_emul(rtc, enabled);
+#endif
return err;
}
@@ -263,7 +278,7 @@ EXPORT_SYMBOL_GPL(rtc_update_irq_enable);
*
* Triggers the registered irq_task function callback.
*/
-static void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode)
+void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode)
{
unsigned long flags;
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index c36749e..5469c52 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -309,7 +309,7 @@ static const struct rtc_class_ops at91_rtc_ops = {
.read_alarm = at91_rtc_readalarm,
.set_alarm = at91_rtc_setalarm,
.proc = at91_rtc_proc,
- .alarm_irq_enabled = at91_rtc_alarm_irq_enable,
+ .alarm_irq_enable = at91_rtc_alarm_irq_enable,
};
/*
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index 37c3cc1..d0e06ed 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -46,6 +46,105 @@ static int rtc_dev_open(struct inode *inode, struct file *file)
return err;
}
+#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
+/*
+ * Routine to poll RTC seconds field for change as often as possible,
+ * after first RTC_UIE use timer to reduce polling
+ */
+static void rtc_uie_task(struct work_struct *work)
+{
+ struct rtc_device *rtc =
+ container_of(work, struct rtc_device, uie_task);
+ struct rtc_time tm;
+ int num = 0;
+ int err;
+
+ err = rtc_read_time(rtc, &tm);
+
+ spin_lock_irq(&rtc->irq_lock);
+ if (rtc->stop_uie_polling || err) {
+ rtc->uie_task_active = 0;
+ } else if (rtc->oldsecs != tm.tm_sec) {
+ num = (tm.tm_sec + 60 - rtc->oldsecs) % 60;
+ rtc->oldsecs = tm.tm_sec;
+ rtc->uie_timer.expires = jiffies + HZ - (HZ/10);
+ rtc->uie_timer_active = 1;
+ rtc->uie_task_active = 0;
+ add_timer(&rtc->uie_timer);
+ } else if (schedule_work(&rtc->uie_task) == 0) {
+ rtc->uie_task_active = 0;
+ }
+ spin_unlock_irq(&rtc->irq_lock);
+ if (num)
+ rtc_handle_legacy_irq(rtc, num, RTC_UF);
+}
+static void rtc_uie_timer(unsigned long data)
+{
+ struct rtc_device *rtc = (struct rtc_device *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rtc->irq_lock, flags);
+ rtc->uie_timer_active = 0;
+ rtc->uie_task_active = 1;
+ if ((schedule_work(&rtc->uie_task) == 0))
+ rtc->uie_task_active = 0;
+ spin_unlock_irqrestore(&rtc->irq_lock, flags);
+}
+
+static int clear_uie(struct rtc_device *rtc)
+{
+ spin_lock_irq(&rtc->irq_lock);
+ if (rtc->uie_irq_active) {
+ rtc->stop_uie_polling = 1;
+ if (rtc->uie_timer_active) {
+ spin_unlock_irq(&rtc->irq_lock);
+ del_timer_sync(&rtc->uie_timer);
+ spin_lock_irq(&rtc->irq_lock);
+ rtc->uie_timer_active = 0;
+ }
+ if (rtc->uie_task_active) {
+ spin_unlock_irq(&rtc->irq_lock);
+ flush_scheduled_work();
+ spin_lock_irq(&rtc->irq_lock);
+ }
+ rtc->uie_irq_active = 0;
+ }
+ spin_unlock_irq(&rtc->irq_lock);
+ return 0;
+}
+
+static int set_uie(struct rtc_device *rtc)
+{
+ struct rtc_time tm;
+ int err;
+
+ err = rtc_read_time(rtc, &tm);
+ if (err)
+ return err;
+ spin_lock_irq(&rtc->irq_lock);
+ if (!rtc->uie_irq_active) {
+ rtc->uie_irq_active = 1;
+ rtc->stop_uie_polling = 0;
+ rtc->oldsecs = tm.tm_sec;
+ rtc->uie_task_active = 1;
+ if (schedule_work(&rtc->uie_task) == 0)
+ rtc->uie_task_active = 0;
+ }
+ rtc->irq_data = 0;
+ spin_unlock_irq(&rtc->irq_lock);
+ return 0;
+}
+
+int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc, unsigned int enabled)
+{
+ if (enabled)
+ return set_uie(rtc);
+ else
+ return clear_uie(rtc);
+}
+EXPORT_SYMBOL(rtc_dev_update_irq_enable_emul);
+
+#endif /* CONFIG_RTC_INTF_DEV_UIE_EMUL */
static ssize_t
rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
@@ -387,6 +486,11 @@ void rtc_dev_prepare(struct rtc_device *rtc)
rtc->dev.devt = MKDEV(MAJOR(rtc_devt), rtc->id);
+#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
+ INIT_WORK(&rtc->uie_task, rtc_uie_task);
+ setup_timer(&rtc->uie_timer, rtc_uie_timer, (unsigned long)rtc);
+#endif
+
cdev_init(&rtc->char_dev, &rtc_dev_fops);
rtc->char_dev.owner = rtc->owner;
}
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c
index 23a9ee1..9507354 100644
--- a/drivers/rtc/rtc-ds3232.c
+++ b/drivers/rtc/rtc-ds3232.c
@@ -1,7 +1,7 @@
/*
* RTC client/driver for the Maxim/Dallas DS3232 Real-Time Clock over I2C
*
- * Copyright (C) 2009-2010 Freescale Semiconductor.
+ * Copyright (C) 2009-2011 Freescale Semiconductor.
* Author: Jack Lan <jack.lan@freescale.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -141,9 +141,11 @@ static int ds3232_read_time(struct device *dev, struct rtc_time *time)
time->tm_hour = bcd2bin(hour);
}
- time->tm_wday = bcd2bin(week);
+ /* Day of the week in linux range is 0~6 while 1~7 in RTC chip */
+ time->tm_wday = bcd2bin(week) - 1;
time->tm_mday = bcd2bin(day);
- time->tm_mon = bcd2bin(month & 0x7F);
+ /* linux tm_mon range:0~11, while month range is 1~12 in RTC chip */
+ time->tm_mon = bcd2bin(month & 0x7F) - 1;
if (century)
add_century = 100;
@@ -162,9 +164,11 @@ static int ds3232_set_time(struct device *dev, struct rtc_time *time)
buf[0] = bin2bcd(time->tm_sec);
buf[1] = bin2bcd(time->tm_min);
buf[2] = bin2bcd(time->tm_hour);
- buf[3] = bin2bcd(time->tm_wday); /* Day of the week */
+ /* Day of the week in linux range is 0~6 while 1~7 in RTC chip */
+ buf[3] = bin2bcd(time->tm_wday + 1);
buf[4] = bin2bcd(time->tm_mday); /* Date */
- buf[5] = bin2bcd(time->tm_mon);
+ /* linux tm_mon range:0~11, while month range is 1~12 in RTC chip */
+ buf[5] = bin2bcd(time->tm_mon + 1);
if (time->tm_year >= 100) {
buf[5] |= 0x80;
buf[6] = bin2bcd(time->tm_year - 100);
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index cf953ec..b80fa28 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -77,18 +77,20 @@ static irqreturn_t s3c_rtc_tickirq(int irq, void *id)
}
/* Update control registers */
-static void s3c_rtc_setaie(int to)
+static int s3c_rtc_setaie(struct device *dev, unsigned int enabled)
{
unsigned int tmp;
- pr_debug("%s: aie=%d\n", __func__, to);
+ pr_debug("%s: aie=%d\n", __func__, enabled);
tmp = readb(s3c_rtc_base + S3C2410_RTCALM) & ~S3C2410_RTCALM_ALMEN;
- if (to)
+ if (enabled)
tmp |= S3C2410_RTCALM_ALMEN;
writeb(tmp, s3c_rtc_base + S3C2410_RTCALM);
+
+ return 0;
}
static int s3c_rtc_setpie(struct device *dev, int enabled)
@@ -308,7 +310,7 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
writeb(alrm_en, base + S3C2410_RTCALM);
- s3c_rtc_setaie(alrm->enabled);
+ s3c_rtc_setaie(dev, alrm->enabled);
return 0;
}
@@ -440,7 +442,7 @@ static int __devexit s3c_rtc_remove(struct platform_device *dev)
rtc_device_unregister(rtc);
s3c_rtc_setpie(&dev->dev, 0);
- s3c_rtc_setaie(0);
+ s3c_rtc_setaie(&dev->dev, 0);
clk_disable(rtc_clk);
clk_put(rtc_clk);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 318672d..a9fe23d 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -72,7 +72,7 @@ static struct dasd_discipline dasd_eckd_discipline;
static struct ccw_device_id dasd_eckd_ids[] = {
{ CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1},
{ CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2},
- { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3390, 0), .driver_info = 0x3},
+ { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info = 0x3},
{ CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4},
{ CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5},
{ CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6},
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index c881a14..1f6a4d8 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -62,8 +62,8 @@ static int xpram_devs;
/*
* Parameter parsing functions.
*/
-static int __initdata devs = XPRAM_DEVS;
-static char __initdata *sizes[XPRAM_MAX_DEVS];
+static int devs = XPRAM_DEVS;
+static char *sizes[XPRAM_MAX_DEVS];
module_param(devs, int, 0);
module_param_array(sizes, charp, NULL, 0);
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c
index 8cd58e4..5ad44da 100644
--- a/drivers/s390/char/keyboard.c
+++ b/drivers/s390/char/keyboard.c
@@ -460,7 +460,8 @@ kbd_ioctl(struct kbd_data *kbd, struct file *file,
unsigned int cmd, unsigned long arg)
{
void __user *argp;
- int ct, perm;
+ unsigned int ct;
+ int perm;
argp = (void __user *)arg;
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
index 7a242f0..267b54e 100644
--- a/drivers/s390/char/tape.h
+++ b/drivers/s390/char/tape.h
@@ -280,6 +280,14 @@ tape_do_io_free(struct tape_device *device, struct tape_request *request)
return rc;
}
+static inline void
+tape_do_io_async_free(struct tape_device *device, struct tape_request *request)
+{
+ request->callback = (void *) tape_free_request;
+ request->callback_data = NULL;
+ tape_do_io_async(device, request);
+}
+
extern int tape_oper_handler(int irq, int status);
extern void tape_noper_handler(int irq, int status);
extern int tape_open(struct tape_device *);
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index c17f35b..c265111 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -53,23 +53,11 @@ static void tape_34xx_delete_sbid_from(struct tape_device *, int);
* Medium sense for 34xx tapes. There is no 'real' medium sense call.
* So we just do a normal sense.
*/
-static int
-tape_34xx_medium_sense(struct tape_device *device)
+static void __tape_34xx_medium_sense(struct tape_request *request)
{
- struct tape_request *request;
- unsigned char *sense;
- int rc;
-
- request = tape_alloc_request(1, 32);
- if (IS_ERR(request)) {
- DBF_EXCEPTION(6, "MSEN fail\n");
- return PTR_ERR(request);
- }
-
- request->op = TO_MSEN;
- tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata);
+ struct tape_device *device = request->device;
+ unsigned char *sense;
- rc = tape_do_io_interruptible(device, request);
if (request->rc == 0) {
sense = request->cpdata;
@@ -88,15 +76,47 @@ tape_34xx_medium_sense(struct tape_device *device)
device->tape_generic_status |= GMT_WR_PROT(~0);
else
device->tape_generic_status &= ~GMT_WR_PROT(~0);
- } else {
+ } else
DBF_EVENT(4, "tape_34xx: medium sense failed with rc=%d\n",
request->rc);
- }
tape_free_request(request);
+}
+
+static int tape_34xx_medium_sense(struct tape_device *device)
+{
+ struct tape_request *request;
+ int rc;
+
+ request = tape_alloc_request(1, 32);
+ if (IS_ERR(request)) {
+ DBF_EXCEPTION(6, "MSEN fail\n");
+ return PTR_ERR(request);
+ }
+ request->op = TO_MSEN;
+ tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata);
+ rc = tape_do_io_interruptible(device, request);
+ __tape_34xx_medium_sense(request);
return rc;
}
+static void tape_34xx_medium_sense_async(struct tape_device *device)
+{
+ struct tape_request *request;
+
+ request = tape_alloc_request(1, 32);
+ if (IS_ERR(request)) {
+ DBF_EXCEPTION(6, "MSEN fail\n");
+ return;
+ }
+
+ request->op = TO_MSEN;
+ tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata);
+ request->callback = (void *) __tape_34xx_medium_sense;
+ request->callback_data = NULL;
+ tape_do_io_async(device, request);
+}
+
struct tape_34xx_work {
struct tape_device *device;
enum tape_op op;
@@ -109,6 +129,9 @@ struct tape_34xx_work {
* is inserted but cannot call tape_do_io* from an interrupt context.
* Maybe that's useful for other actions we want to start from the
* interrupt handler.
+ * Note: the work handler is called by the system work queue. The tape
+ * commands started by the handler need to be asynchrounous, otherwise
+ * a deadlock can occur e.g. in case of a deferred cc=1 (see __tape_do_irq).
*/
static void
tape_34xx_work_handler(struct work_struct *work)
@@ -119,7 +142,7 @@ tape_34xx_work_handler(struct work_struct *work)
switch(p->op) {
case TO_MSEN:
- tape_34xx_medium_sense(device);
+ tape_34xx_medium_sense_async(device);
break;
default:
DBF_EVENT(3, "T34XX: internal error: unknown work\n");
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index fbe361f..de2e99e 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -329,17 +329,17 @@ out:
/*
* Enable encryption
*/
-static int tape_3592_enable_crypt(struct tape_device *device)
+static struct tape_request *__tape_3592_enable_crypt(struct tape_device *device)
{
struct tape_request *request;
char *data;
DBF_EVENT(6, "tape_3592_enable_crypt\n");
if (!crypt_supported(device))
- return -ENOSYS;
+ return ERR_PTR(-ENOSYS);
request = tape_alloc_request(2, 72);
if (IS_ERR(request))
- return PTR_ERR(request);
+ return request;
data = request->cpdata;
memset(data,0,72);
@@ -354,23 +354,42 @@ static int tape_3592_enable_crypt(struct tape_device *device)
request->op = TO_CRYPT_ON;
tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data);
tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36);
+ return request;
+}
+
+static int tape_3592_enable_crypt(struct tape_device *device)
+{
+ struct tape_request *request;
+
+ request = __tape_3592_enable_crypt(device);
+ if (IS_ERR(request))
+ return PTR_ERR(request);
return tape_do_io_free(device, request);
}
+static void tape_3592_enable_crypt_async(struct tape_device *device)
+{
+ struct tape_request *request;
+
+ request = __tape_3592_enable_crypt(device);
+ if (!IS_ERR(request))
+ tape_do_io_async_free(device, request);
+}
+
/*
* Disable encryption
*/
-static int tape_3592_disable_crypt(struct tape_device *device)
+static struct tape_request *__tape_3592_disable_crypt(struct tape_device *device)
{
struct tape_request *request;
char *data;
DBF_EVENT(6, "tape_3592_disable_crypt\n");
if (!crypt_supported(device))
- return -ENOSYS;
+ return ERR_PTR(-ENOSYS);
request = tape_alloc_request(2, 72);
if (IS_ERR(request))
- return PTR_ERR(request);
+ return request;
data = request->cpdata;
memset(data,0,72);
@@ -383,9 +402,28 @@ static int tape_3592_disable_crypt(struct tape_device *device)
tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data);
tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36);
+ return request;
+}
+
+static int tape_3592_disable_crypt(struct tape_device *device)
+{
+ struct tape_request *request;
+
+ request = __tape_3592_disable_crypt(device);
+ if (IS_ERR(request))
+ return PTR_ERR(request);
return tape_do_io_free(device, request);
}
+static void tape_3592_disable_crypt_async(struct tape_device *device)
+{
+ struct tape_request *request;
+
+ request = __tape_3592_disable_crypt(device);
+ if (!IS_ERR(request))
+ tape_do_io_async_free(device, request);
+}
+
/*
* IOCTL: Set encryption status
*/
@@ -457,8 +495,7 @@ tape_3590_ioctl(struct tape_device *device, unsigned int cmd, unsigned long arg)
/*
* SENSE Medium: Get Sense data about medium state
*/
-static int
-tape_3590_sense_medium(struct tape_device *device)
+static int tape_3590_sense_medium(struct tape_device *device)
{
struct tape_request *request;
@@ -470,6 +507,18 @@ tape_3590_sense_medium(struct tape_device *device)
return tape_do_io_free(device, request);
}
+static void tape_3590_sense_medium_async(struct tape_device *device)
+{
+ struct tape_request *request;
+
+ request = tape_alloc_request(1, 128);
+ if (IS_ERR(request))
+ return;
+ request->op = TO_MSEN;
+ tape_ccw_end(request->cpaddr, MEDIUM_SENSE, 128, request->cpdata);
+ tape_do_io_async_free(device, request);
+}
+
/*
* MTTELL: Tell block. Return the number of block relative to current file.
*/
@@ -546,15 +595,14 @@ tape_3590_read_opposite(struct tape_device *device,
* 2. The attention msg is written to the "read subsystem data" buffer.
* In this case we probably should print it to the console.
*/
-static int
-tape_3590_read_attmsg(struct tape_device *device)
+static void tape_3590_read_attmsg_async(struct tape_device *device)
{
struct tape_request *request;
char *buf;
request = tape_alloc_request(3, 4096);
if (IS_ERR(request))
- return PTR_ERR(request);
+ return;
request->op = TO_READ_ATTMSG;
buf = request->cpdata;
buf[0] = PREP_RD_SS_DATA;
@@ -562,12 +610,15 @@ tape_3590_read_attmsg(struct tape_device *device)
tape_ccw_cc(request->cpaddr, PERFORM_SS_FUNC, 12, buf);
tape_ccw_cc(request->cpaddr + 1, READ_SS_DATA, 4096 - 12, buf + 12);
tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
- return tape_do_io_free(device, request);
+ tape_do_io_async_free(device, request);
}
/*
* These functions are used to schedule follow-up actions from within an
* interrupt context (like unsolicited interrupts).
+ * Note: the work handler is called by the system work queue. The tape
+ * commands started by the handler need to be asynchrounous, otherwise
+ * a deadlock can occur e.g. in case of a deferred cc=1 (see __tape_do_irq).
*/
struct work_handler_data {
struct tape_device *device;
@@ -583,16 +634,16 @@ tape_3590_work_handler(struct work_struct *work)
switch (p->op) {
case TO_MSEN:
- tape_3590_sense_medium(p->device);
+ tape_3590_sense_medium_async(p->device);
break;
case TO_READ_ATTMSG:
- tape_3590_read_attmsg(p->device);
+ tape_3590_read_attmsg_async(p->device);
break;
case TO_CRYPT_ON:
- tape_3592_enable_crypt(p->device);
+ tape_3592_enable_crypt_async(p->device);
break;
case TO_CRYPT_OFF:
- tape_3592_disable_crypt(p->device);
+ tape_3592_disable_crypt_async(p->device);
break;
default:
DBF_EVENT(3, "T3590: work handler undefined for "
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 44578b5..d3e58d7 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1561,6 +1561,7 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
{
struct Scsi_Host *host = rport_to_shost(rport);
fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
+ unsigned long flags;
if (!fcport)
return;
@@ -1573,10 +1574,10 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
* Transport has effectively 'deleted' the rport, clear
* all local references.
*/
- spin_lock_irq(host->host_lock);
+ spin_lock_irqsave(host->host_lock, flags);
fcport->rport = fcport->drport = NULL;
*((fc_port_t **)rport->dd_data) = NULL;
- spin_unlock_irq(host->host_lock);
+ spin_unlock_irqrestore(host->host_lock, flags);
if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
return;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index f948e1a..d9479c3 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -2505,11 +2505,12 @@ qla2x00_rport_del(void *data)
{
fc_port_t *fcport = data;
struct fc_rport *rport;
+ unsigned long flags;
- spin_lock_irq(fcport->vha->host->host_lock);
+ spin_lock_irqsave(fcport->vha->host->host_lock, flags);
rport = fcport->drport ? fcport->drport: fcport->rport;
fcport->drport = NULL;
- spin_unlock_irq(fcport->vha->host->host_lock);
+ spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
if (rport)
fc_remote_port_delete(rport);
}
@@ -2879,6 +2880,7 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
struct fc_rport_identifiers rport_ids;
struct fc_rport *rport;
struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
qla2x00_rport_del(fcport);
@@ -2893,9 +2895,9 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
"Unable to allocate fc remote port!\n");
return;
}
- spin_lock_irq(fcport->vha->host->host_lock);
+ spin_lock_irqsave(fcport->vha->host->host_lock, flags);
*((fc_port_t **)rport->dd_data) = fcport;
- spin_unlock_irq(fcport->vha->host->host_lock);
+ spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
rport->supported_classes = fcport->supported_classes;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index c194c23..f27724d 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -562,7 +562,6 @@ qla2xxx_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)
}
if (atomic_read(&fcport->state) != FCS_ONLINE) {
if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
- atomic_read(&fcport->state) == FCS_DEVICE_LOST ||
atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
cmd->result = DID_NO_CONNECT << 16;
goto qc24_fail_command;
@@ -2513,6 +2512,7 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
{
struct fc_rport *rport;
scsi_qla_host_t *base_vha;
+ unsigned long flags;
if (!fcport->rport)
return;
@@ -2520,9 +2520,9 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
rport = fcport->rport;
if (defer) {
base_vha = pci_get_drvdata(vha->hw->pdev);
- spin_lock_irq(vha->host->host_lock);
+ spin_lock_irqsave(vha->host->host_lock, flags);
fcport->drport = rport;
- spin_unlock_irq(vha->host->host_lock);
+ spin_unlock_irqrestore(vha->host->host_lock, flags);
set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
qla2xxx_wake_dpc(base_vha);
} else
@@ -3282,10 +3282,10 @@ qla2x00_do_dpc(void *data)
set_user_nice(current, -20);
+ set_current_state(TASK_INTERRUPTIBLE);
while (!kthread_should_stop()) {
DEBUG3(printk("qla2x00: DPC handler sleeping\n"));
- set_current_state(TASK_INTERRUPTIBLE);
schedule();
__set_current_state(TASK_RUNNING);
@@ -3454,7 +3454,9 @@ qla2x00_do_dpc(void *data)
qla2x00_do_dpc_all_vps(base_vha);
ha->dpc_active = 0;
+ set_current_state(TASK_INTERRUPTIBLE);
} /* End of while(1) */
+ __set_current_state(TASK_RUNNING);
DEBUG(printk("scsi(%ld): DPC handler exiting\n", base_vha->host_no));
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 7b31093..a6b2d72 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1671,7 +1671,7 @@ static int do_device_access(struct scsi_cmnd *scmd,
unsigned long long lba, unsigned int num, int write)
{
int ret;
- unsigned int block, rest = 0;
+ unsigned long long block, rest = 0;
int (*func)(struct scsi_cmnd *, unsigned char *, int);
func = write ? fetch_to_dev_buffer : fill_from_dev_buffer;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 9045c52..fb2bb35 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -443,7 +443,7 @@ static void scsi_run_queue(struct request_queue *q)
&sdev->request_queue->queue_flags);
if (flagset)
queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
- __blk_run_queue(sdev->request_queue);
+ __blk_run_queue(sdev->request_queue, false);
if (flagset)
queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
spin_unlock(sdev->request_queue->queue_lock);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 998c01b..5c3ccfc 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3829,7 +3829,7 @@ fc_bsg_goose_queue(struct fc_rport *rport)
!test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags);
if (flagset)
queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q);
- __blk_run_queue(rport->rqst_q);
+ __blk_run_queue(rport->rqst_q, false);
if (flagset)
queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q);
spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags);
diff --git a/drivers/spi/pxa2xx_spi_pci.c b/drivers/spi/pxa2xx_spi_pci.c
index 351d8a3..19752b0 100644
--- a/drivers/spi/pxa2xx_spi_pci.c
+++ b/drivers/spi/pxa2xx_spi_pci.c
@@ -7,10 +7,9 @@
#include <linux/of_device.h>
#include <linux/spi/pxa2xx_spi.h>
-struct awesome_struct {
+struct ce4100_info {
struct ssp_device ssp;
- struct platform_device spi_pdev;
- struct pxa2xx_spi_master spi_pdata;
+ struct platform_device *spi_pdev;
};
static DEFINE_MUTEX(ssp_lock);
@@ -51,23 +50,15 @@ void pxa_ssp_free(struct ssp_device *ssp)
}
EXPORT_SYMBOL_GPL(pxa_ssp_free);
-static void plat_dev_release(struct device *dev)
-{
- struct awesome_struct *as = container_of(dev,
- struct awesome_struct, spi_pdev.dev);
-
- of_device_node_put(&as->spi_pdev.dev);
-}
-
static int __devinit ce4100_spi_probe(struct pci_dev *dev,
const struct pci_device_id *ent)
{
int ret;
resource_size_t phys_beg;
resource_size_t phys_len;
- struct awesome_struct *spi_info;
+ struct ce4100_info *spi_info;
struct platform_device *pdev;
- struct pxa2xx_spi_master *spi_pdata;
+ struct pxa2xx_spi_master spi_pdata;
struct ssp_device *ssp;
ret = pci_enable_device(dev);
@@ -84,33 +75,30 @@ static int __devinit ce4100_spi_probe(struct pci_dev *dev,
return ret;
}
+ pdev = platform_device_alloc("pxa2xx-spi", dev->devfn);
spi_info = kzalloc(sizeof(*spi_info), GFP_KERNEL);
- if (!spi_info) {
+ if (!pdev || !spi_info ) {
ret = -ENOMEM;
- goto err_kz;
+ goto err_nomem;
}
- ssp = &spi_info->ssp;
- pdev = &spi_info->spi_pdev;
- spi_pdata = &spi_info->spi_pdata;
+ memset(&spi_pdata, 0, sizeof(spi_pdata));
+ spi_pdata.num_chipselect = dev->devfn;
- pdev->name = "pxa2xx-spi";
- pdev->id = dev->devfn;
- pdev->dev.parent = &dev->dev;
- pdev->dev.platform_data = &spi_info->spi_pdata;
+ ret = platform_device_add_data(pdev, &spi_pdata, sizeof(spi_pdata));
+ if (ret)
+ goto err_nomem;
+ pdev->dev.parent = &dev->dev;
#ifdef CONFIG_OF
pdev->dev.of_node = dev->dev.of_node;
#endif
- pdev->dev.release = plat_dev_release;
-
- spi_pdata->num_chipselect = dev->devfn;
-
+ ssp = &spi_info->ssp;
ssp->phys_base = pci_resource_start(dev, 0);
ssp->mmio_base = ioremap(phys_beg, phys_len);
if (!ssp->mmio_base) {
dev_err(&pdev->dev, "failed to ioremap() registers\n");
ret = -EIO;
- goto err_remap;
+ goto err_nomem;
}
ssp->irq = dev->irq;
ssp->port_id = pdev->id;
@@ -122,7 +110,7 @@ static int __devinit ce4100_spi_probe(struct pci_dev *dev,
pci_set_drvdata(dev, spi_info);
- ret = platform_device_register(pdev);
+ ret = platform_device_add(pdev);
if (ret)
goto err_dev_add;
@@ -135,27 +123,21 @@ err_dev_add:
mutex_unlock(&ssp_lock);
iounmap(ssp->mmio_base);
-err_remap:
- kfree(spi_info);
-
-err_kz:
+err_nomem:
release_mem_region(phys_beg, phys_len);
-
+ platform_device_put(pdev);
+ kfree(spi_info);
return ret;
}
static void __devexit ce4100_spi_remove(struct pci_dev *dev)
{
- struct awesome_struct *spi_info;
- struct platform_device *pdev;
+ struct ce4100_info *spi_info;
struct ssp_device *ssp;
spi_info = pci_get_drvdata(dev);
-
ssp = &spi_info->ssp;
- pdev = &spi_info->spi_pdev;
-
- platform_device_unregister(pdev);
+ platform_device_unregister(spi_info->spi_pdev);
iounmap(ssp->mmio_base);
release_mem_region(pci_resource_start(dev, 0),
@@ -171,7 +153,6 @@ static void __devexit ce4100_spi_remove(struct pci_dev *dev)
}
static struct pci_device_id ce4100_spi_devices[] __devinitdata = {
-
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e6a) },
{ },
};
diff --git a/drivers/target/Makefile b/drivers/target/Makefile
index 5cfd708..973bb19 100644
--- a/drivers/target/Makefile
+++ b/drivers/target/Makefile
@@ -13,8 +13,7 @@ target_core_mod-y := target_core_configfs.o \
target_core_transport.o \
target_core_cdb.o \
target_core_ua.o \
- target_core_rd.o \
- target_core_mib.o
+ target_core_rd.o
obj-$(CONFIG_TARGET_CORE) += target_core_mod.o
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 2764510..caf8dc1 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -37,7 +37,6 @@
#include <linux/parser.h>
#include <linux/syscalls.h>
#include <linux/configfs.h>
-#include <linux/proc_fs.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
@@ -1971,13 +1970,35 @@ static void target_core_dev_release(struct config_item *item)
{
struct se_subsystem_dev *se_dev = container_of(to_config_group(item),
struct se_subsystem_dev, se_dev_group);
- struct config_group *dev_cg;
-
- if (!(se_dev))
- return;
+ struct se_hba *hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item);
+ struct se_subsystem_api *t = hba->transport;
+ struct config_group *dev_cg = &se_dev->se_dev_group;
- dev_cg = &se_dev->se_dev_group;
kfree(dev_cg->default_groups);
+ /*
+ * This pointer will set when the storage is enabled with:
+ *`echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable`
+ */
+ if (se_dev->se_dev_ptr) {
+ printk(KERN_INFO "Target_Core_ConfigFS: Calling se_free_"
+ "virtual_device() for se_dev_ptr: %p\n",
+ se_dev->se_dev_ptr);
+
+ se_free_virtual_device(se_dev->se_dev_ptr, hba);
+ } else {
+ /*
+ * Release struct se_subsystem_dev->se_dev_su_ptr..
+ */
+ printk(KERN_INFO "Target_Core_ConfigFS: Calling t->free_"
+ "device() for se_dev_su_ptr: %p\n",
+ se_dev->se_dev_su_ptr);
+
+ t->free_device(se_dev->se_dev_su_ptr);
+ }
+
+ printk(KERN_INFO "Target_Core_ConfigFS: Deallocating se_subsystem"
+ "_dev_t: %p\n", se_dev);
+ kfree(se_dev);
}
static ssize_t target_core_dev_show(struct config_item *item,
@@ -2140,7 +2161,16 @@ static struct configfs_attribute *target_core_alua_lu_gp_attrs[] = {
NULL,
};
+static void target_core_alua_lu_gp_release(struct config_item *item)
+{
+ struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
+ struct t10_alua_lu_gp, lu_gp_group);
+
+ core_alua_free_lu_gp(lu_gp);
+}
+
static struct configfs_item_operations target_core_alua_lu_gp_ops = {
+ .release = target_core_alua_lu_gp_release,
.show_attribute = target_core_alua_lu_gp_attr_show,
.store_attribute = target_core_alua_lu_gp_attr_store,
};
@@ -2191,9 +2221,11 @@ static void target_core_alua_drop_lu_gp(
printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Logical Unit"
" Group: core/alua/lu_gps/%s, ID: %hu\n",
config_item_name(item), lu_gp->lu_gp_id);
-
+ /*
+ * core_alua_free_lu_gp() is called from target_core_alua_lu_gp_ops->release()
+ * -> target_core_alua_lu_gp_release()
+ */
config_item_put(item);
- core_alua_free_lu_gp(lu_gp);
}
static struct configfs_group_operations target_core_alua_lu_gps_group_ops = {
@@ -2549,7 +2581,16 @@ static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = {
NULL,
};
+static void target_core_alua_tg_pt_gp_release(struct config_item *item)
+{
+ struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
+ struct t10_alua_tg_pt_gp, tg_pt_gp_group);
+
+ core_alua_free_tg_pt_gp(tg_pt_gp);
+}
+
static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = {
+ .release = target_core_alua_tg_pt_gp_release,
.show_attribute = target_core_alua_tg_pt_gp_attr_show,
.store_attribute = target_core_alua_tg_pt_gp_attr_store,
};
@@ -2602,9 +2643,11 @@ static void target_core_alua_drop_tg_pt_gp(
printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Target Port"
" Group: alua/tg_pt_gps/%s, ID: %hu\n",
config_item_name(item), tg_pt_gp->tg_pt_gp_id);
-
+ /*
+ * core_alua_free_tg_pt_gp() is called from target_core_alua_tg_pt_gp_ops->release()
+ * -> target_core_alua_tg_pt_gp_release().
+ */
config_item_put(item);
- core_alua_free_tg_pt_gp(tg_pt_gp);
}
static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = {
@@ -2771,13 +2814,11 @@ static void target_core_drop_subdev(
struct se_subsystem_api *t;
struct config_item *df_item;
struct config_group *dev_cg, *tg_pt_gp_cg;
- int i, ret;
+ int i;
hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item);
- if (mutex_lock_interruptible(&hba->hba_access_mutex))
- goto out;
-
+ mutex_lock(&hba->hba_access_mutex);
t = hba->transport;
spin_lock(&se_global->g_device_lock);
@@ -2791,7 +2832,10 @@ static void target_core_drop_subdev(
config_item_put(df_item);
}
kfree(tg_pt_gp_cg->default_groups);
- core_alua_free_tg_pt_gp(T10_ALUA(se_dev)->default_tg_pt_gp);
+ /*
+ * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp
+ * directly from target_core_alua_tg_pt_gp_release().
+ */
T10_ALUA(se_dev)->default_tg_pt_gp = NULL;
dev_cg = &se_dev->se_dev_group;
@@ -2800,38 +2844,12 @@ static void target_core_drop_subdev(
dev_cg->default_groups[i] = NULL;
config_item_put(df_item);
}
-
- config_item_put(item);
/*
- * This pointer will set when the storage is enabled with:
- * `echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable`
+ * The releasing of se_dev and associated se_dev->se_dev_ptr is done
+ * from target_core_dev_item_ops->release() ->target_core_dev_release().
*/
- if (se_dev->se_dev_ptr) {
- printk(KERN_INFO "Target_Core_ConfigFS: Calling se_free_"
- "virtual_device() for se_dev_ptr: %p\n",
- se_dev->se_dev_ptr);
-
- ret = se_free_virtual_device(se_dev->se_dev_ptr, hba);
- if (ret < 0)
- goto hba_out;
- } else {
- /*
- * Release struct se_subsystem_dev->se_dev_su_ptr..
- */
- printk(KERN_INFO "Target_Core_ConfigFS: Calling t->free_"
- "device() for se_dev_su_ptr: %p\n",
- se_dev->se_dev_su_ptr);
-
- t->free_device(se_dev->se_dev_su_ptr);
- }
-
- printk(KERN_INFO "Target_Core_ConfigFS: Deallocating se_subsystem"
- "_dev_t: %p\n", se_dev);
-
-hba_out:
+ config_item_put(item);
mutex_unlock(&hba->hba_access_mutex);
-out:
- kfree(se_dev);
}
static struct configfs_group_operations target_core_hba_group_ops = {
@@ -2914,6 +2932,13 @@ SE_HBA_ATTR(hba_mode, S_IRUGO | S_IWUSR);
CONFIGFS_EATTR_OPS(target_core_hba, se_hba, hba_group);
+static void target_core_hba_release(struct config_item *item)
+{
+ struct se_hba *hba = container_of(to_config_group(item),
+ struct se_hba, hba_group);
+ core_delete_hba(hba);
+}
+
static struct configfs_attribute *target_core_hba_attrs[] = {
&target_core_hba_hba_info.attr,
&target_core_hba_hba_mode.attr,
@@ -2921,6 +2946,7 @@ static struct configfs_attribute *target_core_hba_attrs[] = {
};
static struct configfs_item_operations target_core_hba_item_ops = {
+ .release = target_core_hba_release,
.show_attribute = target_core_hba_attr_show,
.store_attribute = target_core_hba_attr_store,
};
@@ -2997,10 +3023,11 @@ static void target_core_call_delhbafromtarget(
struct config_group *group,
struct config_item *item)
{
- struct se_hba *hba = item_to_hba(item);
-
+ /*
+ * core_delete_hba() is called from target_core_hba_item_ops->release()
+ * -> target_core_hba_release()
+ */
config_item_put(item);
- core_delete_hba(hba);
}
static struct configfs_group_operations target_core_group_ops = {
@@ -3022,7 +3049,6 @@ static int target_core_init_configfs(void)
struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL;
struct config_group *lu_gp_cg = NULL;
struct configfs_subsystem *subsys;
- struct proc_dir_entry *scsi_target_proc = NULL;
struct t10_alua_lu_gp *lu_gp;
int ret;
@@ -3128,21 +3154,10 @@ static int target_core_init_configfs(void)
if (core_dev_setup_virtual_lun0() < 0)
goto out;
- scsi_target_proc = proc_mkdir("scsi_target", 0);
- if (!(scsi_target_proc)) {
- printk(KERN_ERR "proc_mkdir(scsi_target, 0) failed\n");
- goto out;
- }
- ret = init_scsi_target_mib();
- if (ret < 0)
- goto out;
-
return 0;
out:
configfs_unregister_subsystem(subsys);
- if (scsi_target_proc)
- remove_proc_entry("scsi_target", 0);
core_dev_release_virtual_lun0();
rd_module_exit();
out_global:
@@ -3178,8 +3193,7 @@ static void target_core_exit_configfs(void)
config_item_put(item);
}
kfree(lu_gp_cg->default_groups);
- core_alua_free_lu_gp(se_global->default_lu_gp);
- se_global->default_lu_gp = NULL;
+ lu_gp_cg->default_groups = NULL;
alua_cg = &se_global->alua_group;
for (i = 0; alua_cg->default_groups[i]; i++) {
@@ -3188,6 +3202,7 @@ static void target_core_exit_configfs(void)
config_item_put(item);
}
kfree(alua_cg->default_groups);
+ alua_cg->default_groups = NULL;
hba_cg = &se_global->target_core_hbagroup;
for (i = 0; hba_cg->default_groups[i]; i++) {
@@ -3196,20 +3211,20 @@ static void target_core_exit_configfs(void)
config_item_put(item);
}
kfree(hba_cg->default_groups);
-
- for (i = 0; subsys->su_group.default_groups[i]; i++) {
- item = &subsys->su_group.default_groups[i]->cg_item;
- subsys->su_group.default_groups[i] = NULL;
- config_item_put(item);
- }
+ hba_cg->default_groups = NULL;
+ /*
+ * We expect subsys->su_group.default_groups to be released
+ * by configfs subsystem provider logic..
+ */
+ configfs_unregister_subsystem(subsys);
kfree(subsys->su_group.default_groups);
- configfs_unregister_subsystem(subsys);
+ core_alua_free_lu_gp(se_global->default_lu_gp);
+ se_global->default_lu_gp = NULL;
+
printk(KERN_INFO "TARGET_CORE[0]: Released ConfigFS Fabric"
" Infrastructure\n");
- remove_scsi_target_mib();
- remove_proc_entry("scsi_target", 0);
core_dev_release_virtual_lun0();
rd_module_exit();
release_se_global();
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 317ce58..5da051a 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -373,11 +373,11 @@ int core_update_device_list_for_node(
/*
* deve->se_lun_acl will be NULL for demo-mode created LUNs
* that have not been explictly concerted to MappedLUNs ->
- * struct se_lun_acl.
+ * struct se_lun_acl, but we remove deve->alua_port_list from
+ * port->sep_alua_list. This also means that active UAs and
+ * NodeACL context specific PR metadata for demo-mode
+ * MappedLUN *deve will be released below..
*/
- if (!(deve->se_lun_acl))
- return 0;
-
spin_lock_bh(&port->sep_alua_lock);
list_del(&deve->alua_port_list);
spin_unlock_bh(&port->sep_alua_lock);
@@ -395,12 +395,14 @@ int core_update_device_list_for_node(
printk(KERN_ERR "struct se_dev_entry->se_lun_acl"
" already set for demo mode -> explict"
" LUN ACL transition\n");
+ spin_unlock_irq(&nacl->device_list_lock);
return -1;
}
if (deve->se_lun != lun) {
printk(KERN_ERR "struct se_dev_entry->se_lun does"
" match passed struct se_lun for demo mode"
" -> explict LUN ACL transition\n");
+ spin_unlock_irq(&nacl->device_list_lock);
return -1;
}
deve->se_lun_acl = lun_acl;
@@ -865,9 +867,6 @@ static void se_dev_stop(struct se_device *dev)
}
}
spin_unlock(&hba->device_lock);
-
- while (atomic_read(&hba->dev_mib_access_count))
- cpu_relax();
}
int se_dev_check_online(struct se_device *dev)
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 32b148d..b65d1c8 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -214,12 +214,22 @@ TCM_MAPPEDLUN_ATTR(write_protect, S_IRUGO | S_IWUSR);
CONFIGFS_EATTR_OPS(target_fabric_mappedlun, se_lun_acl, se_lun_group);
+static void target_fabric_mappedlun_release(struct config_item *item)
+{
+ struct se_lun_acl *lacl = container_of(to_config_group(item),
+ struct se_lun_acl, se_lun_group);
+ struct se_portal_group *se_tpg = lacl->se_lun_nacl->se_tpg;
+
+ core_dev_free_initiator_node_lun_acl(se_tpg, lacl);
+}
+
static struct configfs_attribute *target_fabric_mappedlun_attrs[] = {
&target_fabric_mappedlun_write_protect.attr,
NULL,
};
static struct configfs_item_operations target_fabric_mappedlun_item_ops = {
+ .release = target_fabric_mappedlun_release,
.show_attribute = target_fabric_mappedlun_attr_show,
.store_attribute = target_fabric_mappedlun_attr_store,
.allow_link = target_fabric_mappedlun_link,
@@ -337,15 +347,21 @@ static void target_fabric_drop_mappedlun(
struct config_group *group,
struct config_item *item)
{
- struct se_lun_acl *lacl = container_of(to_config_group(item),
- struct se_lun_acl, se_lun_group);
- struct se_portal_group *se_tpg = lacl->se_lun_nacl->se_tpg;
-
config_item_put(item);
- core_dev_free_initiator_node_lun_acl(se_tpg, lacl);
+}
+
+static void target_fabric_nacl_base_release(struct config_item *item)
+{
+ struct se_node_acl *se_nacl = container_of(to_config_group(item),
+ struct se_node_acl, acl_group);
+ struct se_portal_group *se_tpg = se_nacl->se_tpg;
+ struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+
+ tf->tf_ops.fabric_drop_nodeacl(se_nacl);
}
static struct configfs_item_operations target_fabric_nacl_base_item_ops = {
+ .release = target_fabric_nacl_base_release,
.show_attribute = target_fabric_nacl_base_attr_show,
.store_attribute = target_fabric_nacl_base_attr_store,
};
@@ -404,9 +420,6 @@ static void target_fabric_drop_nodeacl(
struct config_group *group,
struct config_item *item)
{
- struct se_portal_group *se_tpg = container_of(group,
- struct se_portal_group, tpg_acl_group);
- struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
struct se_node_acl *se_nacl = container_of(to_config_group(item),
struct se_node_acl, acl_group);
struct config_item *df_item;
@@ -419,9 +432,10 @@ static void target_fabric_drop_nodeacl(
nacl_cg->default_groups[i] = NULL;
config_item_put(df_item);
}
-
+ /*
+ * struct se_node_acl free is done in target_fabric_nacl_base_release()
+ */
config_item_put(item);
- tf->tf_ops.fabric_drop_nodeacl(se_nacl);
}
static struct configfs_group_operations target_fabric_nacl_group_ops = {
@@ -437,7 +451,18 @@ TF_CIT_SETUP(tpg_nacl, NULL, &target_fabric_nacl_group_ops, NULL);
CONFIGFS_EATTR_OPS(target_fabric_np_base, se_tpg_np, tpg_np_group);
+static void target_fabric_np_base_release(struct config_item *item)
+{
+ struct se_tpg_np *se_tpg_np = container_of(to_config_group(item),
+ struct se_tpg_np, tpg_np_group);
+ struct se_portal_group *se_tpg = se_tpg_np->tpg_np_parent;
+ struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+
+ tf->tf_ops.fabric_drop_np(se_tpg_np);
+}
+
static struct configfs_item_operations target_fabric_np_base_item_ops = {
+ .release = target_fabric_np_base_release,
.show_attribute = target_fabric_np_base_attr_show,
.store_attribute = target_fabric_np_base_attr_store,
};
@@ -466,6 +491,7 @@ static struct config_group *target_fabric_make_np(
if (!(se_tpg_np) || IS_ERR(se_tpg_np))
return ERR_PTR(-EINVAL);
+ se_tpg_np->tpg_np_parent = se_tpg;
config_group_init_type_name(&se_tpg_np->tpg_np_group, name,
&TF_CIT_TMPL(tf)->tfc_tpg_np_base_cit);
@@ -476,14 +502,10 @@ static void target_fabric_drop_np(
struct config_group *group,
struct config_item *item)
{
- struct se_portal_group *se_tpg = container_of(group,
- struct se_portal_group, tpg_np_group);
- struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
- struct se_tpg_np *se_tpg_np = container_of(to_config_group(item),
- struct se_tpg_np, tpg_np_group);
-
+ /*
+ * struct se_tpg_np is released via target_fabric_np_base_release()
+ */
config_item_put(item);
- tf->tf_ops.fabric_drop_np(se_tpg_np);
}
static struct configfs_group_operations target_fabric_np_group_ops = {
@@ -814,7 +836,18 @@ TF_CIT_SETUP(tpg_param, &target_fabric_tpg_param_item_ops, NULL, NULL);
*/
CONFIGFS_EATTR_OPS(target_fabric_tpg, se_portal_group, tpg_group);
+static void target_fabric_tpg_release(struct config_item *item)
+{
+ struct se_portal_group *se_tpg = container_of(to_config_group(item),
+ struct se_portal_group, tpg_group);
+ struct se_wwn *wwn = se_tpg->se_tpg_wwn;
+ struct target_fabric_configfs *tf = wwn->wwn_tf;
+
+ tf->tf_ops.fabric_drop_tpg(se_tpg);
+}
+
static struct configfs_item_operations target_fabric_tpg_base_item_ops = {
+ .release = target_fabric_tpg_release,
.show_attribute = target_fabric_tpg_attr_show,
.store_attribute = target_fabric_tpg_attr_store,
};
@@ -872,8 +905,6 @@ static void target_fabric_drop_tpg(
struct config_group *group,
struct config_item *item)
{
- struct se_wwn *wwn = container_of(group, struct se_wwn, wwn_group);
- struct target_fabric_configfs *tf = wwn->wwn_tf;
struct se_portal_group *se_tpg = container_of(to_config_group(item),
struct se_portal_group, tpg_group);
struct config_group *tpg_cg = &se_tpg->tpg_group;
@@ -890,15 +921,28 @@ static void target_fabric_drop_tpg(
}
config_item_put(item);
- tf->tf_ops.fabric_drop_tpg(se_tpg);
}
+static void target_fabric_release_wwn(struct config_item *item)
+{
+ struct se_wwn *wwn = container_of(to_config_group(item),
+ struct se_wwn, wwn_group);
+ struct target_fabric_configfs *tf = wwn->wwn_tf;
+
+ tf->tf_ops.fabric_drop_wwn(wwn);
+}
+
+static struct configfs_item_operations target_fabric_tpg_item_ops = {
+ .release = target_fabric_release_wwn,
+};
+
static struct configfs_group_operations target_fabric_tpg_group_ops = {
.make_group = target_fabric_make_tpg,
.drop_item = target_fabric_drop_tpg,
};
-TF_CIT_SETUP(tpg, NULL, &target_fabric_tpg_group_ops, NULL);
+TF_CIT_SETUP(tpg, &target_fabric_tpg_item_ops, &target_fabric_tpg_group_ops,
+ NULL);
/* End of tfc_tpg_cit */
@@ -932,13 +976,7 @@ static void target_fabric_drop_wwn(
struct config_group *group,
struct config_item *item)
{
- struct target_fabric_configfs *tf = container_of(group,
- struct target_fabric_configfs, tf_group);
- struct se_wwn *wwn = container_of(to_config_group(item),
- struct se_wwn, wwn_group);
-
config_item_put(item);
- tf->tf_ops.fabric_drop_wwn(wwn);
}
static struct configfs_group_operations target_fabric_wwn_group_ops = {
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index c6e0d75..67f0c09 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -154,7 +154,7 @@ static struct se_device *iblock_create_virtdevice(
bd = blkdev_get_by_path(ib_dev->ibd_udev_path,
FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev);
- if (!(bd))
+ if (IS_ERR(bd))
goto failed;
/*
* Setup the local scope queue_limits from struct request_queue->limits
@@ -220,8 +220,10 @@ static void iblock_free_device(void *p)
{
struct iblock_dev *ib_dev = p;
- blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
- bioset_free(ib_dev->ibd_bio_set);
+ if (ib_dev->ibd_bd != NULL)
+ blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
+ if (ib_dev->ibd_bio_set != NULL)
+ bioset_free(ib_dev->ibd_bio_set);
kfree(ib_dev);
}
diff --git a/drivers/target/target_core_mib.c b/drivers/target/target_core_mib.c
deleted file mode 100644
index d5a48aa..0000000
--- a/drivers/target/target_core_mib.c
+++ /dev/null
@@ -1,1078 +0,0 @@
-/*******************************************************************************
- * Filename: target_core_mib.c
- *
- * Copyright (c) 2006-2007 SBE, Inc. All Rights Reserved.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
- *
- * Nicholas A. Bellinger <nab@linux-iscsi.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- ******************************************************************************/
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/timer.h>
-#include <linux/string.h>
-#include <linux/version.h>
-#include <generated/utsrelease.h>
-#include <linux/utsname.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/blkdev.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_host.h>
-
-#include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
-#include <target/target_core_configfs.h>
-
-#include "target_core_hba.h"
-#include "target_core_mib.h"
-
-/* SCSI mib table index */
-static struct scsi_index_table scsi_index_table;
-
-#ifndef INITIAL_JIFFIES
-#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
-#endif
-
-/* SCSI Instance Table */
-#define SCSI_INST_SW_INDEX 1
-#define SCSI_TRANSPORT_INDEX 1
-
-#define NONE "None"
-#define ISPRINT(a) ((a >= ' ') && (a <= '~'))
-
-static inline int list_is_first(const struct list_head *list,
- const struct list_head *head)
-{
- return list->prev == head;
-}
-
-static void *locate_hba_start(
- struct seq_file *seq,
- loff_t *pos)
-{
- spin_lock(&se_global->g_device_lock);
- return seq_list_start(&se_global->g_se_dev_list, *pos);
-}
-
-static void *locate_hba_next(
- struct seq_file *seq,
- void *v,
- loff_t *pos)
-{
- return seq_list_next(v, &se_global->g_se_dev_list, pos);
-}
-
-static void locate_hba_stop(struct seq_file *seq, void *v)
-{
- spin_unlock(&se_global->g_device_lock);
-}
-
-/****************************************************************************
- * SCSI MIB Tables
- ****************************************************************************/
-
-/*
- * SCSI Instance Table
- */
-static void *scsi_inst_seq_start(
- struct seq_file *seq,
- loff_t *pos)
-{
- spin_lock(&se_global->hba_lock);
- return seq_list_start(&se_global->g_hba_list, *pos);
-}
-
-static void *scsi_inst_seq_next(
- struct seq_file *seq,
- void *v,
- loff_t *pos)
-{
- return seq_list_next(v, &se_global->g_hba_list, pos);
-}
-
-static void scsi_inst_seq_stop(struct seq_file *seq, void *v)
-{
- spin_unlock(&se_global->hba_lock);
-}
-
-static int scsi_inst_seq_show(struct seq_file *seq, void *v)
-{
- struct se_hba *hba = list_entry(v, struct se_hba, hba_list);
-
- if (list_is_first(&hba->hba_list, &se_global->g_hba_list))
- seq_puts(seq, "inst sw_indx\n");
-
- seq_printf(seq, "%u %u\n", hba->hba_index, SCSI_INST_SW_INDEX);
- seq_printf(seq, "plugin: %s version: %s\n",
- hba->transport->name, TARGET_CORE_VERSION);
-
- return 0;
-}
-
-static const struct seq_operations scsi_inst_seq_ops = {
- .start = scsi_inst_seq_start,
- .next = scsi_inst_seq_next,
- .stop = scsi_inst_seq_stop,
- .show = scsi_inst_seq_show
-};
-
-static int scsi_inst_seq_open(struct inode *inode, struct file *file)
-{
- return seq_open(file, &scsi_inst_seq_ops);
-}
-
-static const struct file_operations scsi_inst_seq_fops = {
- .owner = THIS_MODULE,
- .open = scsi_inst_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
-/*
- * SCSI Device Table
- */
-static void *scsi_dev_seq_start(struct seq_file *seq, loff_t *pos)
-{
- return locate_hba_start(seq, pos);
-}
-
-static void *scsi_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
- return locate_hba_next(seq, v, pos);
-}
-
-static void scsi_dev_seq_stop(struct seq_file *seq, void *v)
-{
- locate_hba_stop(seq, v);
-}
-
-static int scsi_dev_seq_show(struct seq_file *seq, void *v)
-{
- struct se_hba *hba;
- struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
- g_se_dev_list);
- struct se_device *dev = se_dev->se_dev_ptr;
- char str[28];
- int k;
-
- if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
- seq_puts(seq, "inst indx role ports\n");
-
- if (!(dev))
- return 0;
-
- hba = dev->se_hba;
- if (!(hba)) {
- /* Log error ? */
- return 0;
- }
-
- seq_printf(seq, "%u %u %s %u\n", hba->hba_index,
- dev->dev_index, "Target", dev->dev_port_count);
-
- memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
-
- /* vendor */
- for (k = 0; k < 8; k++)
- str[k] = ISPRINT(DEV_T10_WWN(dev)->vendor[k]) ?
- DEV_T10_WWN(dev)->vendor[k] : 0x20;
- str[k] = 0x20;
-
- /* model */
- for (k = 0; k < 16; k++)
- str[k+9] = ISPRINT(DEV_T10_WWN(dev)->model[k]) ?
- DEV_T10_WWN(dev)->model[k] : 0x20;
- str[k + 9] = 0;
-
- seq_printf(seq, "dev_alias: %s\n", str);
-
- return 0;
-}
-
-static const struct seq_operations scsi_dev_seq_ops = {
- .start = scsi_dev_seq_start,
- .next = scsi_dev_seq_next,
- .stop = scsi_dev_seq_stop,
- .show = scsi_dev_seq_show
-};
-
-static int scsi_dev_seq_open(struct inode *inode, struct file *file)
-{
- return seq_open(file, &scsi_dev_seq_ops);
-}
-
-static const struct file_operations scsi_dev_seq_fops = {
- .owner = THIS_MODULE,
- .open = scsi_dev_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
-/*
- * SCSI Port Table
- */
-static void *scsi_port_seq_start(struct seq_file *seq, loff_t *pos)
-{
- return locate_hba_start(seq, pos);
-}
-
-static void *scsi_port_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
- return locate_hba_next(seq, v, pos);
-}
-
-static void scsi_port_seq_stop(struct seq_file *seq, void *v)
-{
- locate_hba_stop(seq, v);
-}
-
-static int scsi_port_seq_show(struct seq_file *seq, void *v)
-{
- struct se_hba *hba;
- struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
- g_se_dev_list);
- struct se_device *dev = se_dev->se_dev_ptr;
- struct se_port *sep, *sep_tmp;
-
- if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
- seq_puts(seq, "inst device indx role busy_count\n");
-
- if (!(dev))
- return 0;
-
- hba = dev->se_hba;
- if (!(hba)) {
- /* Log error ? */
- return 0;
- }
-
- /* FIXME: scsiPortBusyStatuses count */
- spin_lock(&dev->se_port_lock);
- list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) {
- seq_printf(seq, "%u %u %u %s%u %u\n", hba->hba_index,
- dev->dev_index, sep->sep_index, "Device",
- dev->dev_index, 0);
- }
- spin_unlock(&dev->se_port_lock);
-
- return 0;
-}
-
-static const struct seq_operations scsi_port_seq_ops = {
- .start = scsi_port_seq_start,
- .next = scsi_port_seq_next,
- .stop = scsi_port_seq_stop,
- .show = scsi_port_seq_show
-};
-
-static int scsi_port_seq_open(struct inode *inode, struct file *file)
-{
- return seq_open(file, &scsi_port_seq_ops);
-}
-
-static const struct file_operations scsi_port_seq_fops = {
- .owner = THIS_MODULE,
- .open = scsi_port_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
-/*
- * SCSI Transport Table
- */
-static void *scsi_transport_seq_start(struct seq_file *seq, loff_t *pos)
-{
- return locate_hba_start(seq, pos);
-}
-
-static void *scsi_transport_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
- return locate_hba_next(seq, v, pos);
-}
-
-static void scsi_transport_seq_stop(struct seq_file *seq, void *v)
-{
- locate_hba_stop(seq, v);
-}
-
-static int scsi_transport_seq_show(struct seq_file *seq, void *v)
-{
- struct se_hba *hba;
- struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
- g_se_dev_list);
- struct se_device *dev = se_dev->se_dev_ptr;
- struct se_port *se, *se_tmp;
- struct se_portal_group *tpg;
- struct t10_wwn *wwn;
- char buf[64];
-
- if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
- seq_puts(seq, "inst device indx dev_name\n");
-
- if (!(dev))
- return 0;
-
- hba = dev->se_hba;
- if (!(hba)) {
- /* Log error ? */
- return 0;
- }
-
- wwn = DEV_T10_WWN(dev);
-
- spin_lock(&dev->se_port_lock);
- list_for_each_entry_safe(se, se_tmp, &dev->dev_sep_list, sep_list) {
- tpg = se->sep_tpg;
- sprintf(buf, "scsiTransport%s",
- TPG_TFO(tpg)->get_fabric_name());
-
- seq_printf(seq, "%u %s %u %s+%s\n",
- hba->hba_index, /* scsiTransportIndex */
- buf, /* scsiTransportType */
- (TPG_TFO(tpg)->tpg_get_inst_index != NULL) ?
- TPG_TFO(tpg)->tpg_get_inst_index(tpg) :
- 0,
- TPG_TFO(tpg)->tpg_get_wwn(tpg),
- (strlen(wwn->unit_serial)) ?
- /* scsiTransportDevName */
- wwn->unit_serial : wwn->vendor);
- }
- spin_unlock(&dev->se_port_lock);
-
- return 0;
-}
-
-static const struct seq_operations scsi_transport_seq_ops = {
- .start = scsi_transport_seq_start,
- .next = scsi_transport_seq_next,
- .stop = scsi_transport_seq_stop,
- .show = scsi_transport_seq_show
-};
-
-static int scsi_transport_seq_open(struct inode *inode, struct file *file)
-{
- return seq_open(file, &scsi_transport_seq_ops);
-}
-
-static const struct file_operations scsi_transport_seq_fops = {
- .owner = THIS_MODULE,
- .open = scsi_transport_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
-/*
- * SCSI Target Device Table
- */
-static void *scsi_tgt_dev_seq_start(struct seq_file *seq, loff_t *pos)
-{
- return locate_hba_start(seq, pos);
-}
-
-static void *scsi_tgt_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
- return locate_hba_next(seq, v, pos);
-}
-
-static void scsi_tgt_dev_seq_stop(struct seq_file *seq, void *v)
-{
- locate_hba_stop(seq, v);
-}
-
-
-#define LU_COUNT 1 /* for now */
-static int scsi_tgt_dev_seq_show(struct seq_file *seq, void *v)
-{
- struct se_hba *hba;
- struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
- g_se_dev_list);
- struct se_device *dev = se_dev->se_dev_ptr;
- int non_accessible_lus = 0;
- char status[16];
-
- if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
- seq_puts(seq, "inst indx num_LUs status non_access_LUs"
- " resets\n");
-
- if (!(dev))
- return 0;
-
- hba = dev->se_hba;
- if (!(hba)) {
- /* Log error ? */
- return 0;
- }
-
- switch (dev->dev_status) {
- case TRANSPORT_DEVICE_ACTIVATED:
- strcpy(status, "activated");
- break;
- case TRANSPORT_DEVICE_DEACTIVATED:
- strcpy(status, "deactivated");
- non_accessible_lus = 1;
- break;
- case TRANSPORT_DEVICE_SHUTDOWN:
- strcpy(status, "shutdown");
- non_accessible_lus = 1;
- break;
- case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
- case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
- strcpy(status, "offline");
- non_accessible_lus = 1;
- break;
- default:
- sprintf(status, "unknown(%d)", dev->dev_status);
- non_accessible_lus = 1;
- }
-
- seq_printf(seq, "%u %u %u %s %u %u\n",
- hba->hba_index, dev->dev_index, LU_COUNT,
- status, non_accessible_lus, dev->num_resets);
-
- return 0;
-}
-
-static const struct seq_operations scsi_tgt_dev_seq_ops = {
- .start = scsi_tgt_dev_seq_start,
- .next = scsi_tgt_dev_seq_next,
- .stop = scsi_tgt_dev_seq_stop,
- .show = scsi_tgt_dev_seq_show
-};
-
-static int scsi_tgt_dev_seq_open(struct inode *inode, struct file *file)
-{
- return seq_open(file, &scsi_tgt_dev_seq_ops);
-}
-
-static const struct file_operations scsi_tgt_dev_seq_fops = {
- .owner = THIS_MODULE,
- .open = scsi_tgt_dev_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
-/*
- * SCSI Target Port Table
- */
-static void *scsi_tgt_port_seq_start(struct seq_file *seq, loff_t *pos)
-{
- return locate_hba_start(seq, pos);
-}
-
-static void *scsi_tgt_port_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
- return locate_hba_next(seq, v, pos);
-}
-
-static void scsi_tgt_port_seq_stop(struct seq_file *seq, void *v)
-{
- locate_hba_stop(seq, v);
-}
-
-static int scsi_tgt_port_seq_show(struct seq_file *seq, void *v)
-{
- struct se_hba *hba;
- struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
- g_se_dev_list);
- struct se_device *dev = se_dev->se_dev_ptr;
- struct se_port *sep, *sep_tmp;
- struct se_portal_group *tpg;
- u32 rx_mbytes, tx_mbytes;
- unsigned long long num_cmds;
- char buf[64];
-
- if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
- seq_puts(seq, "inst device indx name port_index in_cmds"
- " write_mbytes read_mbytes hs_in_cmds\n");
-
- if (!(dev))
- return 0;
-
- hba = dev->se_hba;
- if (!(hba)) {
- /* Log error ? */
- return 0;
- }
-
- spin_lock(&dev->se_port_lock);
- list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) {
- tpg = sep->sep_tpg;
- sprintf(buf, "%sPort#",
- TPG_TFO(tpg)->get_fabric_name());
-
- seq_printf(seq, "%u %u %u %s%d %s%s%d ",
- hba->hba_index,
- dev->dev_index,
- sep->sep_index,
- buf, sep->sep_index,
- TPG_TFO(tpg)->tpg_get_wwn(tpg), "+t+",
- TPG_TFO(tpg)->tpg_get_tag(tpg));
-
- spin_lock(&sep->sep_lun->lun_sep_lock);
- num_cmds = sep->sep_stats.cmd_pdus;
- rx_mbytes = (sep->sep_stats.rx_data_octets >> 20);
- tx_mbytes = (sep->sep_stats.tx_data_octets >> 20);
- spin_unlock(&sep->sep_lun->lun_sep_lock);
-
- seq_printf(seq, "%llu %u %u %u\n", num_cmds,
- rx_mbytes, tx_mbytes, 0);
- }
- spin_unlock(&dev->se_port_lock);
-
- return 0;
-}
-
-static const struct seq_operations scsi_tgt_port_seq_ops = {
- .start = scsi_tgt_port_seq_start,
- .next = scsi_tgt_port_seq_next,
- .stop = scsi_tgt_port_seq_stop,
- .show = scsi_tgt_port_seq_show
-};
-
-static int scsi_tgt_port_seq_open(struct inode *inode, struct file *file)
-{
- return seq_open(file, &scsi_tgt_port_seq_ops);
-}
-
-static const struct file_operations scsi_tgt_port_seq_fops = {
- .owner = THIS_MODULE,
- .open = scsi_tgt_port_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
-/*
- * SCSI Authorized Initiator Table:
- * It contains the SCSI Initiators authorized to be attached to one of the
- * local Target ports.
- * Iterates through all active TPGs and extracts the info from the ACLs
- */
-static void *scsi_auth_intr_seq_start(struct seq_file *seq, loff_t *pos)
-{
- spin_lock_bh(&se_global->se_tpg_lock);
- return seq_list_start(&se_global->g_se_tpg_list, *pos);
-}
-
-static void *scsi_auth_intr_seq_next(struct seq_file *seq, void *v,
- loff_t *pos)
-{
- return seq_list_next(v, &se_global->g_se_tpg_list, pos);
-}
-
-static void scsi_auth_intr_seq_stop(struct seq_file *seq, void *v)
-{
- spin_unlock_bh(&se_global->se_tpg_lock);
-}
-
-static int scsi_auth_intr_seq_show(struct seq_file *seq, void *v)
-{
- struct se_portal_group *se_tpg = list_entry(v, struct se_portal_group,
- se_tpg_list);
- struct se_dev_entry *deve;
- struct se_lun *lun;
- struct se_node_acl *se_nacl;
- int j;
-
- if (list_is_first(&se_tpg->se_tpg_list,
- &se_global->g_se_tpg_list))
- seq_puts(seq, "inst dev port indx dev_or_port intr_name "
- "map_indx att_count num_cmds read_mbytes "
- "write_mbytes hs_num_cmds creation_time row_status\n");
-
- if (!(se_tpg))
- return 0;
-
- spin_lock(&se_tpg->acl_node_lock);
- list_for_each_entry(se_nacl, &se_tpg->acl_node_list, acl_list) {
-
- atomic_inc(&se_nacl->mib_ref_count);
- smp_mb__after_atomic_inc();
- spin_unlock(&se_tpg->acl_node_lock);
-
- spin_lock_irq(&se_nacl->device_list_lock);
- for (j = 0; j < TRANSPORT_MAX_LUNS_PER_TPG; j++) {
- deve = &se_nacl->device_list[j];
- if (!(deve->lun_flags &
- TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) ||
- (!deve->se_lun))
- continue;
- lun = deve->se_lun;
- if (!lun->lun_se_dev)
- continue;
-
- seq_printf(seq, "%u %u %u %u %u %s %u %u %u %u %u %u"
- " %u %s\n",
- /* scsiInstIndex */
- (TPG_TFO(se_tpg)->tpg_get_inst_index != NULL) ?
- TPG_TFO(se_tpg)->tpg_get_inst_index(se_tpg) :
- 0,
- /* scsiDeviceIndex */
- lun->lun_se_dev->dev_index,
- /* scsiAuthIntrTgtPortIndex */
- TPG_TFO(se_tpg)->tpg_get_tag(se_tpg),
- /* scsiAuthIntrIndex */
- se_nacl->acl_index,
- /* scsiAuthIntrDevOrPort */
- 1,
- /* scsiAuthIntrName */
- se_nacl->initiatorname[0] ?
- se_nacl->initiatorname : NONE,
- /* FIXME: scsiAuthIntrLunMapIndex */
- 0,
- /* scsiAuthIntrAttachedTimes */
- deve->attach_count,
- /* scsiAuthIntrOutCommands */
- deve->total_cmds,
- /* scsiAuthIntrReadMegaBytes */
- (u32)(deve->read_bytes >> 20),
- /* scsiAuthIntrWrittenMegaBytes */
- (u32)(deve->write_bytes >> 20),
- /* FIXME: scsiAuthIntrHSOutCommands */
- 0,
- /* scsiAuthIntrLastCreation */
- (u32)(((u32)deve->creation_time -
- INITIAL_JIFFIES) * 100 / HZ),
- /* FIXME: scsiAuthIntrRowStatus */
- "Ready");
- }
- spin_unlock_irq(&se_nacl->device_list_lock);
-
- spin_lock(&se_tpg->acl_node_lock);
- atomic_dec(&se_nacl->mib_ref_count);
- smp_mb__after_atomic_dec();
- }
- spin_unlock(&se_tpg->acl_node_lock);
-
- return 0;
-}
-
-static const struct seq_operations scsi_auth_intr_seq_ops = {
- .start = scsi_auth_intr_seq_start,
- .next = scsi_auth_intr_seq_next,
- .stop = scsi_auth_intr_seq_stop,
- .show = scsi_auth_intr_seq_show
-};
-
-static int scsi_auth_intr_seq_open(struct inode *inode, struct file *file)
-{
- return seq_open(file, &scsi_auth_intr_seq_ops);
-}
-
-static const struct file_operations scsi_auth_intr_seq_fops = {
- .owner = THIS_MODULE,
- .open = scsi_auth_intr_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
-/*
- * SCSI Attached Initiator Port Table:
- * It lists the SCSI Initiators attached to one of the local Target ports.
- * Iterates through all active TPGs and use active sessions from each TPG
- * to list the info fo this table.
- */
-static void *scsi_att_intr_port_seq_start(struct seq_file *seq, loff_t *pos)
-{
- spin_lock_bh(&se_global->se_tpg_lock);
- return seq_list_start(&se_global->g_se_tpg_list, *pos);
-}
-
-static void *scsi_att_intr_port_seq_next(struct seq_file *seq, void *v,
- loff_t *pos)
-{
- return seq_list_next(v, &se_global->g_se_tpg_list, pos);
-}
-
-static void scsi_att_intr_port_seq_stop(struct seq_file *seq, void *v)
-{
- spin_unlock_bh(&se_global->se_tpg_lock);
-}
-
-static int scsi_att_intr_port_seq_show(struct seq_file *seq, void *v)
-{
- struct se_portal_group *se_tpg = list_entry(v, struct se_portal_group,
- se_tpg_list);
- struct se_dev_entry *deve;
- struct se_lun *lun;
- struct se_node_acl *se_nacl;
- struct se_session *se_sess;
- unsigned char buf[64];
- int j;
-
- if (list_is_first(&se_tpg->se_tpg_list,
- &se_global->g_se_tpg_list))
- seq_puts(seq, "inst dev port indx port_auth_indx port_name"
- " port_ident\n");
-
- if (!(se_tpg))
- return 0;
-
- spin_lock(&se_tpg->session_lock);
- list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) {
- if ((TPG_TFO(se_tpg)->sess_logged_in(se_sess)) ||
- (!se_sess->se_node_acl) ||
- (!se_sess->se_node_acl->device_list))
- continue;
-
- atomic_inc(&se_sess->mib_ref_count);
- smp_mb__after_atomic_inc();
- se_nacl = se_sess->se_node_acl;
- atomic_inc(&se_nacl->mib_ref_count);
- smp_mb__after_atomic_inc();
- spin_unlock(&se_tpg->session_lock);
-
- spin_lock_irq(&se_nacl->device_list_lock);
- for (j = 0; j < TRANSPORT_MAX_LUNS_PER_TPG; j++) {
- deve = &se_nacl->device_list[j];
- if (!(deve->lun_flags &
- TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) ||
- (!deve->se_lun))
- continue;
-
- lun = deve->se_lun;
- if (!lun->lun_se_dev)
- continue;
-
- memset(buf, 0, 64);
- if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL)
- TPG_TFO(se_tpg)->sess_get_initiator_sid(
- se_sess, (unsigned char *)&buf[0], 64);
-
- seq_printf(seq, "%u %u %u %u %u %s+i+%s\n",
- /* scsiInstIndex */
- (TPG_TFO(se_tpg)->tpg_get_inst_index != NULL) ?
- TPG_TFO(se_tpg)->tpg_get_inst_index(se_tpg) :
- 0,
- /* scsiDeviceIndex */
- lun->lun_se_dev->dev_index,
- /* scsiPortIndex */
- TPG_TFO(se_tpg)->tpg_get_tag(se_tpg),
- /* scsiAttIntrPortIndex */
- (TPG_TFO(se_tpg)->sess_get_index != NULL) ?
- TPG_TFO(se_tpg)->sess_get_index(se_sess) :
- 0,
- /* scsiAttIntrPortAuthIntrIdx */
- se_nacl->acl_index,
- /* scsiAttIntrPortName */
- se_nacl->initiatorname[0] ?
- se_nacl->initiatorname : NONE,
- /* scsiAttIntrPortIdentifier */
- buf);
- }
- spin_unlock_irq(&se_nacl->device_list_lock);
-
- spin_lock(&se_tpg->session_lock);
- atomic_dec(&se_nacl->mib_ref_count);
- smp_mb__after_atomic_dec();
- atomic_dec(&se_sess->mib_ref_count);
- smp_mb__after_atomic_dec();
- }
- spin_unlock(&se_tpg->session_lock);
-
- return 0;
-}
-
-static const struct seq_operations scsi_att_intr_port_seq_ops = {
- .start = scsi_att_intr_port_seq_start,
- .next = scsi_att_intr_port_seq_next,
- .stop = scsi_att_intr_port_seq_stop,
- .show = scsi_att_intr_port_seq_show
-};
-
-static int scsi_att_intr_port_seq_open(struct inode *inode, struct file *file)
-{
- return seq_open(file, &scsi_att_intr_port_seq_ops);
-}
-
-static const struct file_operations scsi_att_intr_port_seq_fops = {
- .owner = THIS_MODULE,
- .open = scsi_att_intr_port_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
-/*
- * SCSI Logical Unit Table
- */
-static void *scsi_lu_seq_start(struct seq_file *seq, loff_t *pos)
-{
- return locate_hba_start(seq, pos);
-}
-
-static void *scsi_lu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
- return locate_hba_next(seq, v, pos);
-}
-
-static void scsi_lu_seq_stop(struct seq_file *seq, void *v)
-{
- locate_hba_stop(seq, v);
-}
-
-#define SCSI_LU_INDEX 1
-static int scsi_lu_seq_show(struct seq_file *seq, void *v)
-{
- struct se_hba *hba;
- struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
- g_se_dev_list);
- struct se_device *dev = se_dev->se_dev_ptr;
- int j;
- char str[28];
-
- if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
- seq_puts(seq, "inst dev indx LUN lu_name vend prod rev"
- " dev_type status state-bit num_cmds read_mbytes"
- " write_mbytes resets full_stat hs_num_cmds creation_time\n");
-
- if (!(dev))
- return 0;
-
- hba = dev->se_hba;
- if (!(hba)) {
- /* Log error ? */
- return 0;
- }
-
- /* Fix LU state, if we can read it from the device */
- seq_printf(seq, "%u %u %u %llu %s", hba->hba_index,
- dev->dev_index, SCSI_LU_INDEX,
- (unsigned long long)0, /* FIXME: scsiLuDefaultLun */
- (strlen(DEV_T10_WWN(dev)->unit_serial)) ?
- /* scsiLuWwnName */
- (char *)&DEV_T10_WWN(dev)->unit_serial[0] :
- "None");
-
- memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
- /* scsiLuVendorId */
- for (j = 0; j < 8; j++)
- str[j] = ISPRINT(DEV_T10_WWN(dev)->vendor[j]) ?
- DEV_T10_WWN(dev)->vendor[j] : 0x20;
- str[8] = 0;
- seq_printf(seq, " %s", str);
-
- /* scsiLuProductId */
- for (j = 0; j < 16; j++)
- str[j] = ISPRINT(DEV_T10_WWN(dev)->model[j]) ?
- DEV_T10_WWN(dev)->model[j] : 0x20;
- str[16] = 0;
- seq_printf(seq, " %s", str);
-
- /* scsiLuRevisionId */
- for (j = 0; j < 4; j++)
- str[j] = ISPRINT(DEV_T10_WWN(dev)->revision[j]) ?
- DEV_T10_WWN(dev)->revision[j] : 0x20;
- str[4] = 0;
- seq_printf(seq, " %s", str);
-
- seq_printf(seq, " %u %s %s %llu %u %u %u %u %u %u\n",
- /* scsiLuPeripheralType */
- TRANSPORT(dev)->get_device_type(dev),
- (dev->dev_status == TRANSPORT_DEVICE_ACTIVATED) ?
- "available" : "notavailable", /* scsiLuStatus */
- "exposed", /* scsiLuState */
- (unsigned long long)dev->num_cmds,
- /* scsiLuReadMegaBytes */
- (u32)(dev->read_bytes >> 20),
- /* scsiLuWrittenMegaBytes */
- (u32)(dev->write_bytes >> 20),
- dev->num_resets, /* scsiLuInResets */
- 0, /* scsiLuOutTaskSetFullStatus */
- 0, /* scsiLuHSInCommands */
- (u32)(((u32)dev->creation_time - INITIAL_JIFFIES) *
- 100 / HZ));
-
- return 0;
-}
-
-static const struct seq_operations scsi_lu_seq_ops = {
- .start = scsi_lu_seq_start,
- .next = scsi_lu_seq_next,
- .stop = scsi_lu_seq_stop,
- .show = scsi_lu_seq_show
-};
-
-static int scsi_lu_seq_open(struct inode *inode, struct file *file)
-{
- return seq_open(file, &scsi_lu_seq_ops);
-}
-
-static const struct file_operations scsi_lu_seq_fops = {
- .owner = THIS_MODULE,
- .open = scsi_lu_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
-/****************************************************************************/
-
-/*
- * Remove proc fs entries
- */
-void remove_scsi_target_mib(void)
-{
- remove_proc_entry("scsi_target/mib/scsi_inst", NULL);
- remove_proc_entry("scsi_target/mib/scsi_dev", NULL);
- remove_proc_entry("scsi_target/mib/scsi_port", NULL);
- remove_proc_entry("scsi_target/mib/scsi_transport", NULL);
- remove_proc_entry("scsi_target/mib/scsi_tgt_dev", NULL);
- remove_proc_entry("scsi_target/mib/scsi_tgt_port", NULL);
- remove_proc_entry("scsi_target/mib/scsi_auth_intr", NULL);
- remove_proc_entry("scsi_target/mib/scsi_att_intr_port", NULL);
- remove_proc_entry("scsi_target/mib/scsi_lu", NULL);
- remove_proc_entry("scsi_target/mib", NULL);
-}
-
-/*
- * Create proc fs entries for the mib tables
- */
-int init_scsi_target_mib(void)
-{
- struct proc_dir_entry *dir_entry;
- struct proc_dir_entry *scsi_inst_entry;
- struct proc_dir_entry *scsi_dev_entry;
- struct proc_dir_entry *scsi_port_entry;
- struct proc_dir_entry *scsi_transport_entry;
- struct proc_dir_entry *scsi_tgt_dev_entry;
- struct proc_dir_entry *scsi_tgt_port_entry;
- struct proc_dir_entry *scsi_auth_intr_entry;
- struct proc_dir_entry *scsi_att_intr_port_entry;
- struct proc_dir_entry *scsi_lu_entry;
-
- dir_entry = proc_mkdir("scsi_target/mib", NULL);
- if (!(dir_entry)) {
- printk(KERN_ERR "proc_mkdir() failed.\n");
- return -1;
- }
-
- scsi_inst_entry =
- create_proc_entry("scsi_target/mib/scsi_inst", 0, NULL);
- if (scsi_inst_entry)
- scsi_inst_entry->proc_fops = &scsi_inst_seq_fops;
- else
- goto error;
-
- scsi_dev_entry =
- create_proc_entry("scsi_target/mib/scsi_dev", 0, NULL);
- if (scsi_dev_entry)
- scsi_dev_entry->proc_fops = &scsi_dev_seq_fops;
- else
- goto error;
-
- scsi_port_entry =
- create_proc_entry("scsi_target/mib/scsi_port", 0, NULL);
- if (scsi_port_entry)
- scsi_port_entry->proc_fops = &scsi_port_seq_fops;
- else
- goto error;
-
- scsi_transport_entry =
- create_proc_entry("scsi_target/mib/scsi_transport", 0, NULL);
- if (scsi_transport_entry)
- scsi_transport_entry->proc_fops = &scsi_transport_seq_fops;
- else
- goto error;
-
- scsi_tgt_dev_entry =
- create_proc_entry("scsi_target/mib/scsi_tgt_dev", 0, NULL);
- if (scsi_tgt_dev_entry)
- scsi_tgt_dev_entry->proc_fops = &scsi_tgt_dev_seq_fops;
- else
- goto error;
-
- scsi_tgt_port_entry =
- create_proc_entry("scsi_target/mib/scsi_tgt_port", 0, NULL);
- if (scsi_tgt_port_entry)
- scsi_tgt_port_entry->proc_fops = &scsi_tgt_port_seq_fops;
- else
- goto error;
-
- scsi_auth_intr_entry =
- create_proc_entry("scsi_target/mib/scsi_auth_intr", 0, NULL);
- if (scsi_auth_intr_entry)
- scsi_auth_intr_entry->proc_fops = &scsi_auth_intr_seq_fops;
- else
- goto error;
-
- scsi_att_intr_port_entry =
- create_proc_entry("scsi_target/mib/scsi_att_intr_port", 0, NULL);
- if (scsi_att_intr_port_entry)
- scsi_att_intr_port_entry->proc_fops =
- &scsi_att_intr_port_seq_fops;
- else
- goto error;
-
- scsi_lu_entry = create_proc_entry("scsi_target/mib/scsi_lu", 0, NULL);
- if (scsi_lu_entry)
- scsi_lu_entry->proc_fops = &scsi_lu_seq_fops;
- else
- goto error;
-
- return 0;
-
-error:
- printk(KERN_ERR "create_proc_entry() failed.\n");
- remove_scsi_target_mib();
- return -1;
-}
-
-/*
- * Initialize the index table for allocating unique row indexes to various mib
- * tables
- */
-void init_scsi_index_table(void)
-{
- memset(&scsi_index_table, 0, sizeof(struct scsi_index_table));
- spin_lock_init(&scsi_index_table.lock);
-}
-
-/*
- * Allocate a new row index for the entry type specified
- */
-u32 scsi_get_new_index(scsi_index_t type)
-{
- u32 new_index;
-
- if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) {
- printk(KERN_ERR "Invalid index type %d\n", type);
- return -1;
- }
-
- spin_lock(&scsi_index_table.lock);
- new_index = ++scsi_index_table.scsi_mib_index[type];
- if (new_index == 0)
- new_index = ++scsi_index_table.scsi_mib_index[type];
- spin_unlock(&scsi_index_table.lock);
-
- return new_index;
-}
-EXPORT_SYMBOL(scsi_get_new_index);
diff --git a/drivers/target/target_core_mib.h b/drivers/target/target_core_mib.h
deleted file mode 100644
index 2772046..0000000
--- a/drivers/target/target_core_mib.h
+++ /dev/null
@@ -1,28 +0,0 @@
-#ifndef TARGET_CORE_MIB_H
-#define TARGET_CORE_MIB_H
-
-typedef enum {
- SCSI_INST_INDEX,
- SCSI_DEVICE_INDEX,
- SCSI_AUTH_INTR_INDEX,
- SCSI_INDEX_TYPE_MAX
-} scsi_index_t;
-
-struct scsi_index_table {
- spinlock_t lock;
- u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
-} ____cacheline_aligned;
-
-/* SCSI Port stats */
-struct scsi_port_stats {
- u64 cmd_pdus;
- u64 tx_data_octets;
- u64 rx_data_octets;
-} ____cacheline_aligned;
-
-extern int init_scsi_target_mib(void);
-extern void remove_scsi_target_mib(void);
-extern void init_scsi_index_table(void);
-extern u32 scsi_get_new_index(scsi_index_t);
-
-#endif /*** TARGET_CORE_MIB_H ***/
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 742d246..f2a0847 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -462,8 +462,8 @@ static struct se_device *pscsi_create_type_disk(
*/
bd = blkdev_get_by_path(se_dev->se_dev_udev_path,
FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv);
- if (!(bd)) {
- printk("pSCSI: blkdev_get_by_path() failed\n");
+ if (IS_ERR(bd)) {
+ printk(KERN_ERR "pSCSI: blkdev_get_by_path() failed\n");
scsi_device_put(sd);
return NULL;
}
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index abfa81a..c26f674 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -275,7 +275,6 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
spin_lock_init(&acl->device_list_lock);
spin_lock_init(&acl->nacl_sess_lock);
atomic_set(&acl->acl_pr_ref_count, 0);
- atomic_set(&acl->mib_ref_count, 0);
acl->queue_depth = TPG_TFO(tpg)->tpg_get_default_depth(tpg);
snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
acl->se_tpg = tpg;
@@ -318,12 +317,6 @@ void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
cpu_relax();
}
-void core_tpg_wait_for_mib_ref(struct se_node_acl *nacl)
-{
- while (atomic_read(&nacl->mib_ref_count) != 0)
- cpu_relax();
-}
-
void core_tpg_clear_object_luns(struct se_portal_group *tpg)
{
int i, ret;
@@ -480,7 +473,6 @@ int core_tpg_del_initiator_node_acl(
spin_unlock_bh(&tpg->session_lock);
core_tpg_wait_for_nacl_pr_ref(acl);
- core_tpg_wait_for_mib_ref(acl);
core_clear_initiator_node_from_tpg(acl, tpg);
core_free_device_list_for_node(acl, tpg);
@@ -701,6 +693,8 @@ EXPORT_SYMBOL(core_tpg_register);
int core_tpg_deregister(struct se_portal_group *se_tpg)
{
+ struct se_node_acl *nacl, *nacl_tmp;
+
printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
" for endpoint: %s Portal Tag %u\n",
(se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
@@ -714,6 +708,25 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
cpu_relax();
+ /*
+ * Release any remaining demo-mode generated se_node_acl that have
+ * not been released because of TFO->tpg_check_demo_mode_cache() == 1
+ * in transport_deregister_session().
+ */
+ spin_lock_bh(&se_tpg->acl_node_lock);
+ list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
+ acl_list) {
+ list_del(&nacl->acl_list);
+ se_tpg->num_node_acls--;
+ spin_unlock_bh(&se_tpg->acl_node_lock);
+
+ core_tpg_wait_for_nacl_pr_ref(nacl);
+ core_free_device_list_for_node(nacl, se_tpg);
+ TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, nacl);
+
+ spin_lock_bh(&se_tpg->acl_node_lock);
+ }
+ spin_unlock_bh(&se_tpg->acl_node_lock);
if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
core_tpg_release_virtual_lun0(se_tpg);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 28b6292..236e22d 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -379,6 +379,40 @@ void release_se_global(void)
se_global = NULL;
}
+/* SCSI statistics table index */
+static struct scsi_index_table scsi_index_table;
+
+/*
+ * Initialize the index table for allocating unique row indexes to various mib
+ * tables.
+ */
+void init_scsi_index_table(void)
+{
+ memset(&scsi_index_table, 0, sizeof(struct scsi_index_table));
+ spin_lock_init(&scsi_index_table.lock);
+}
+
+/*
+ * Allocate a new row index for the entry type specified
+ */
+u32 scsi_get_new_index(scsi_index_t type)
+{
+ u32 new_index;
+
+ if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) {
+ printk(KERN_ERR "Invalid index type %d\n", type);
+ return -EINVAL;
+ }
+
+ spin_lock(&scsi_index_table.lock);
+ new_index = ++scsi_index_table.scsi_mib_index[type];
+ if (new_index == 0)
+ new_index = ++scsi_index_table.scsi_mib_index[type];
+ spin_unlock(&scsi_index_table.lock);
+
+ return new_index;
+}
+
void transport_init_queue_obj(struct se_queue_obj *qobj)
{
atomic_set(&qobj->queue_cnt, 0);
@@ -437,7 +471,6 @@ struct se_session *transport_init_session(void)
}
INIT_LIST_HEAD(&se_sess->sess_list);
INIT_LIST_HEAD(&se_sess->sess_acl_list);
- atomic_set(&se_sess->mib_ref_count, 0);
return se_sess;
}
@@ -546,12 +579,6 @@ void transport_deregister_session(struct se_session *se_sess)
transport_free_session(se_sess);
return;
}
- /*
- * Wait for possible reference in drivers/target/target_core_mib.c:
- * scsi_att_intr_port_seq_show()
- */
- while (atomic_read(&se_sess->mib_ref_count) != 0)
- cpu_relax();
spin_lock_bh(&se_tpg->session_lock);
list_del(&se_sess->sess_list);
@@ -574,7 +601,6 @@ void transport_deregister_session(struct se_session *se_sess)
spin_unlock_bh(&se_tpg->acl_node_lock);
core_tpg_wait_for_nacl_pr_ref(se_nacl);
- core_tpg_wait_for_mib_ref(se_nacl);
core_free_device_list_for_node(se_nacl, se_tpg);
TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg,
se_nacl);
@@ -4827,6 +4853,8 @@ static int transport_do_se_mem_map(
return ret;
}
+
+ BUG_ON(list_empty(se_mem_list));
/*
* This is the normal path for all normal non BIDI and BIDI-COMMAND
* WRITE payloads.. If we need to do BIDI READ passthrough for
@@ -5008,7 +5036,9 @@ transport_map_control_cmd_to_task(struct se_cmd *cmd)
struct se_mem *se_mem = NULL, *se_mem_lout = NULL;
u32 se_mem_cnt = 0, task_offset = 0;
- BUG_ON(list_empty(cmd->t_task->t_mem_list));
+ if (!list_empty(T_TASK(cmd)->t_mem_list))
+ se_mem = list_entry(T_TASK(cmd)->t_mem_list->next,
+ struct se_mem, se_list);
ret = transport_do_se_mem_map(dev, task,
cmd->t_task->t_mem_list, NULL, se_mem,
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index f7a5dba..bf7c687 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -4,7 +4,6 @@
menuconfig THERMAL
tristate "Generic Thermal sysfs driver"
- depends on NET
help
Generic Thermal Sysfs driver offers a generic mechanism for
thermal management. Usually it's made up of one or more thermal
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index 7d0e63c..713b7ea 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -62,20 +62,6 @@ static DEFINE_MUTEX(thermal_list_lock);
static unsigned int thermal_event_seqnum;
-static struct genl_family thermal_event_genl_family = {
- .id = GENL_ID_GENERATE,
- .name = THERMAL_GENL_FAMILY_NAME,
- .version = THERMAL_GENL_VERSION,
- .maxattr = THERMAL_GENL_ATTR_MAX,
-};
-
-static struct genl_multicast_group thermal_event_mcgrp = {
- .name = THERMAL_GENL_MCAST_GROUP_NAME,
-};
-
-static int genetlink_init(void);
-static void genetlink_exit(void);
-
static int get_idr(struct idr *idr, struct mutex *lock, int *id)
{
int err;
@@ -1225,6 +1211,18 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
EXPORT_SYMBOL(thermal_zone_device_unregister);
+#ifdef CONFIG_NET
+static struct genl_family thermal_event_genl_family = {
+ .id = GENL_ID_GENERATE,
+ .name = THERMAL_GENL_FAMILY_NAME,
+ .version = THERMAL_GENL_VERSION,
+ .maxattr = THERMAL_GENL_ATTR_MAX,
+};
+
+static struct genl_multicast_group thermal_event_mcgrp = {
+ .name = THERMAL_GENL_MCAST_GROUP_NAME,
+};
+
int generate_netlink_event(u32 orig, enum events event)
{
struct sk_buff *skb;
@@ -1301,6 +1299,15 @@ static int genetlink_init(void)
return result;
}
+static void genetlink_exit(void)
+{
+ genl_unregister_family(&thermal_event_genl_family);
+}
+#else /* !CONFIG_NET */
+static inline int genetlink_init(void) { return 0; }
+static inline void genetlink_exit(void) {}
+#endif /* !CONFIG_NET */
+
static int __init thermal_init(void)
{
int result = 0;
@@ -1316,11 +1323,6 @@ static int __init thermal_init(void)
return result;
}
-static void genetlink_exit(void)
-{
- genl_unregister_family(&thermal_event_genl_family);
-}
-
static void __exit thermal_exit(void)
{
class_unregister(&thermal_class);
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 0dae46f..57731e8 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -96,6 +96,22 @@ static struct vendor_data vendor_st = {
};
/* Deals with DMA transactions */
+
+struct pl011_sgbuf {
+ struct scatterlist sg;
+ char *buf;
+};
+
+struct pl011_dmarx_data {
+ struct dma_chan *chan;
+ struct completion complete;
+ bool use_buf_b;
+ struct pl011_sgbuf sgbuf_a;
+ struct pl011_sgbuf sgbuf_b;
+ dma_cookie_t cookie;
+ bool running;
+};
+
struct pl011_dmatx_data {
struct dma_chan *chan;
struct scatterlist sg;
@@ -120,12 +136,70 @@ struct uart_amba_port {
char type[12];
#ifdef CONFIG_DMA_ENGINE
/* DMA stuff */
- bool using_dma;
+ bool using_tx_dma;
+ bool using_rx_dma;
+ struct pl011_dmarx_data dmarx;
struct pl011_dmatx_data dmatx;
#endif
};
/*
+ * Reads up to 256 characters from the FIFO or until it's empty and
+ * inserts them into the TTY layer. Returns the number of characters
+ * read from the FIFO.
+ */
+static int pl011_fifo_to_tty(struct uart_amba_port *uap)
+{
+ u16 status, ch;
+ unsigned int flag, max_count = 256;
+ int fifotaken = 0;
+
+ while (max_count--) {
+ status = readw(uap->port.membase + UART01x_FR);
+ if (status & UART01x_FR_RXFE)
+ break;
+
+ /* Take chars from the FIFO and update status */
+ ch = readw(uap->port.membase + UART01x_DR) |
+ UART_DUMMY_DR_RX;
+ flag = TTY_NORMAL;
+ uap->port.icount.rx++;
+ fifotaken++;
+
+ if (unlikely(ch & UART_DR_ERROR)) {
+ if (ch & UART011_DR_BE) {
+ ch &= ~(UART011_DR_FE | UART011_DR_PE);
+ uap->port.icount.brk++;
+ if (uart_handle_break(&uap->port))
+ continue;
+ } else if (ch & UART011_DR_PE)
+ uap->port.icount.parity++;
+ else if (ch & UART011_DR_FE)
+ uap->port.icount.frame++;
+ if (ch & UART011_DR_OE)
+ uap->port.icount.overrun++;
+
+ ch &= uap->port.read_status_mask;
+
+ if (ch & UART011_DR_BE)
+ flag = TTY_BREAK;
+ else if (ch & UART011_DR_PE)
+ flag = TTY_PARITY;
+ else if (ch & UART011_DR_FE)
+ flag = TTY_FRAME;
+ }
+
+ if (uart_handle_sysrq_char(&uap->port, ch & 255))
+ continue;
+
+ uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
+ }
+
+ return fifotaken;
+}
+
+
+/*
* All the DMA operation mode stuff goes inside this ifdef.
* This assumes that you have a generic DMA device interface,
* no custom DMA interfaces are supported.
@@ -134,6 +208,31 @@ struct uart_amba_port {
#define PL011_DMA_BUFFER_SIZE PAGE_SIZE
+static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
+ enum dma_data_direction dir)
+{
+ sg->buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL);
+ if (!sg->buf)
+ return -ENOMEM;
+
+ sg_init_one(&sg->sg, sg->buf, PL011_DMA_BUFFER_SIZE);
+
+ if (dma_map_sg(chan->device->dev, &sg->sg, 1, dir) != 1) {
+ kfree(sg->buf);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
+ enum dma_data_direction dir)
+{
+ if (sg->buf) {
+ dma_unmap_sg(chan->device->dev, &sg->sg, 1, dir);
+ kfree(sg->buf);
+ }
+}
+
static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
{
/* DMA is the sole user of the platform data right now */
@@ -153,7 +252,7 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
return;
}
- /* Try to acquire a generic DMA engine slave channel */
+ /* Try to acquire a generic DMA engine slave TX channel */
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
@@ -168,6 +267,28 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
dev_info(uap->port.dev, "DMA channel TX %s\n",
dma_chan_name(uap->dmatx.chan));
+
+ /* Optionally make use of an RX channel as well */
+ if (plat->dma_rx_param) {
+ struct dma_slave_config rx_conf = {
+ .src_addr = uap->port.mapbase + UART01x_DR,
+ .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
+ .direction = DMA_FROM_DEVICE,
+ .src_maxburst = uap->fifosize >> 1,
+ };
+
+ chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param);
+ if (!chan) {
+ dev_err(uap->port.dev, "no RX DMA channel!\n");
+ return;
+ }
+
+ dmaengine_slave_config(chan, &rx_conf);
+ uap->dmarx.chan = chan;
+
+ dev_info(uap->port.dev, "DMA channel RX %s\n",
+ dma_chan_name(uap->dmarx.chan));
+ }
}
#ifndef MODULE
@@ -219,9 +340,10 @@ static void pl011_dma_remove(struct uart_amba_port *uap)
/* TODO: remove the initcall if it has not yet executed */
if (uap->dmatx.chan)
dma_release_channel(uap->dmatx.chan);
+ if (uap->dmarx.chan)
+ dma_release_channel(uap->dmarx.chan);
}
-
/* Forward declare this for the refill routine */
static int pl011_dma_tx_refill(struct uart_amba_port *uap);
@@ -380,7 +502,7 @@ static int pl011_dma_tx_refill(struct uart_amba_port *uap)
*/
static bool pl011_dma_tx_irq(struct uart_amba_port *uap)
{
- if (!uap->using_dma)
+ if (!uap->using_tx_dma)
return false;
/*
@@ -432,7 +554,7 @@ static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
{
u16 dmacr;
- if (!uap->using_dma)
+ if (!uap->using_tx_dma)
return false;
if (!uap->port.x_char) {
@@ -492,7 +614,7 @@ static void pl011_dma_flush_buffer(struct uart_port *port)
{
struct uart_amba_port *uap = (struct uart_amba_port *)port;
- if (!uap->using_dma)
+ if (!uap->using_tx_dma)
return;
/* Avoid deadlock with the DMA engine callback */
@@ -508,9 +630,219 @@ static void pl011_dma_flush_buffer(struct uart_port *port)
}
}
+static void pl011_dma_rx_callback(void *data);
+
+static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
+{
+ struct dma_chan *rxchan = uap->dmarx.chan;
+ struct dma_device *dma_dev;
+ struct pl011_dmarx_data *dmarx = &uap->dmarx;
+ struct dma_async_tx_descriptor *desc;
+ struct pl011_sgbuf *sgbuf;
+
+ if (!rxchan)
+ return -EIO;
+
+ /* Start the RX DMA job */
+ sgbuf = uap->dmarx.use_buf_b ?
+ &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
+ dma_dev = rxchan->device;
+ desc = rxchan->device->device_prep_slave_sg(rxchan, &sgbuf->sg, 1,
+ DMA_FROM_DEVICE,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ /*
+ * If the DMA engine is busy and cannot prepare a
+ * channel, no big deal, the driver will fall back
+ * to interrupt mode as a result of this error code.
+ */
+ if (!desc) {
+ uap->dmarx.running = false;
+ dmaengine_terminate_all(rxchan);
+ return -EBUSY;
+ }
+
+ /* Some data to go along to the callback */
+ desc->callback = pl011_dma_rx_callback;
+ desc->callback_param = uap;
+ dmarx->cookie = dmaengine_submit(desc);
+ dma_async_issue_pending(rxchan);
+
+ uap->dmacr |= UART011_RXDMAE;
+ writew(uap->dmacr, uap->port.membase + UART011_DMACR);
+ uap->dmarx.running = true;
+
+ uap->im &= ~UART011_RXIM;
+ writew(uap->im, uap->port.membase + UART011_IMSC);
+
+ return 0;
+}
+
+/*
+ * This is called when either the DMA job is complete, or
+ * the FIFO timeout interrupt occurred. This must be called
+ * with the port spinlock uap->port.lock held.
+ */
+static void pl011_dma_rx_chars(struct uart_amba_port *uap,
+ u32 pending, bool use_buf_b,
+ bool readfifo)
+{
+ struct tty_struct *tty = uap->port.state->port.tty;
+ struct pl011_sgbuf *sgbuf = use_buf_b ?
+ &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
+ struct device *dev = uap->dmarx.chan->device->dev;
+ int dma_count = 0;
+ u32 fifotaken = 0; /* only used for vdbg() */
+
+ /* Pick everything from the DMA first */
+ if (pending) {
+ /* Sync in buffer */
+ dma_sync_sg_for_cpu(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE);
+
+ /*
+ * First take all chars in the DMA pipe, then look in the FIFO.
+ * Note that tty_insert_flip_buf() tries to take as many chars
+ * as it can.
+ */
+ dma_count = tty_insert_flip_string(uap->port.state->port.tty,
+ sgbuf->buf, pending);
+
+ /* Return buffer to device */
+ dma_sync_sg_for_device(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE);
+
+ uap->port.icount.rx += dma_count;
+ if (dma_count < pending)
+ dev_warn(uap->port.dev,
+ "couldn't insert all characters (TTY is full?)\n");
+ }
+
+ /*
+ * Only continue with trying to read the FIFO if all DMA chars have
+ * been taken first.
+ */
+ if (dma_count == pending && readfifo) {
+ /* Clear any error flags */
+ writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS,
+ uap->port.membase + UART011_ICR);
+
+ /*
+ * If we read all the DMA'd characters, and we had an
+ * incomplete buffer, that could be due to an rx error, or
+ * maybe we just timed out. Read any pending chars and check
+ * the error status.
+ *
+ * Error conditions will only occur in the FIFO, these will
+ * trigger an immediate interrupt and stop the DMA job, so we
+ * will always find the error in the FIFO, never in the DMA
+ * buffer.
+ */
+ fifotaken = pl011_fifo_to_tty(uap);
+ }
+
+ spin_unlock(&uap->port.lock);
+ dev_vdbg(uap->port.dev,
+ "Took %d chars from DMA buffer and %d chars from the FIFO\n",
+ dma_count, fifotaken);
+ tty_flip_buffer_push(tty);
+ spin_lock(&uap->port.lock);
+}
+
+static void pl011_dma_rx_irq(struct uart_amba_port *uap)
+{
+ struct pl011_dmarx_data *dmarx = &uap->dmarx;
+ struct dma_chan *rxchan = dmarx->chan;
+ struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
+ &dmarx->sgbuf_b : &dmarx->sgbuf_a;
+ size_t pending;
+ struct dma_tx_state state;
+ enum dma_status dmastat;
+
+ /*
+ * Pause the transfer so we can trust the current counter,
+ * do this before we pause the PL011 block, else we may
+ * overflow the FIFO.
+ */
+ if (dmaengine_pause(rxchan))
+ dev_err(uap->port.dev, "unable to pause DMA transfer\n");
+ dmastat = rxchan->device->device_tx_status(rxchan,
+ dmarx->cookie, &state);
+ if (dmastat != DMA_PAUSED)
+ dev_err(uap->port.dev, "unable to pause DMA transfer\n");
+
+ /* Disable RX DMA - incoming data will wait in the FIFO */
+ uap->dmacr &= ~UART011_RXDMAE;
+ writew(uap->dmacr, uap->port.membase + UART011_DMACR);
+ uap->dmarx.running = false;
+
+ pending = sgbuf->sg.length - state.residue;
+ BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
+ /* Then we terminate the transfer - we now know our residue */
+ dmaengine_terminate_all(rxchan);
+
+ /*
+ * This will take the chars we have so far and insert
+ * into the framework.
+ */
+ pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true);
+
+ /* Switch buffer & re-trigger DMA job */
+ dmarx->use_buf_b = !dmarx->use_buf_b;
+ if (pl011_dma_rx_trigger_dma(uap)) {
+ dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
+ "fall back to interrupt mode\n");
+ uap->im |= UART011_RXIM;
+ writew(uap->im, uap->port.membase + UART011_IMSC);
+ }
+}
+
+static void pl011_dma_rx_callback(void *data)
+{
+ struct uart_amba_port *uap = data;
+ struct pl011_dmarx_data *dmarx = &uap->dmarx;
+ bool lastbuf = dmarx->use_buf_b;
+ int ret;
+
+ /*
+ * This completion interrupt occurs typically when the
+ * RX buffer is totally stuffed but no timeout has yet
+ * occurred. When that happens, we just want the RX
+ * routine to flush out the secondary DMA buffer while
+ * we immediately trigger the next DMA job.
+ */
+ spin_lock_irq(&uap->port.lock);
+ uap->dmarx.running = false;
+ dmarx->use_buf_b = !lastbuf;
+ ret = pl011_dma_rx_trigger_dma(uap);
+
+ pl011_dma_rx_chars(uap, PL011_DMA_BUFFER_SIZE, lastbuf, false);
+ spin_unlock_irq(&uap->port.lock);
+ /*
+ * Do this check after we picked the DMA chars so we don't
+ * get some IRQ immediately from RX.
+ */
+ if (ret) {
+ dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
+ "fall back to interrupt mode\n");
+ uap->im |= UART011_RXIM;
+ writew(uap->im, uap->port.membase + UART011_IMSC);
+ }
+}
+
+/*
+ * Stop accepting received characters, when we're shutting down or
+ * suspending this port.
+ * Locking: called with port lock held and IRQs disabled.
+ */
+static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
+{
+ /* FIXME. Just disable the DMA enable */
+ uap->dmacr &= ~UART011_RXDMAE;
+ writew(uap->dmacr, uap->port.membase + UART011_DMACR);
+}
static void pl011_dma_startup(struct uart_amba_port *uap)
{
+ int ret;
+
if (!uap->dmatx.chan)
return;
@@ -525,8 +857,33 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
/* The DMA buffer is now the FIFO the TTY subsystem can use */
uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
- uap->using_dma = true;
+ uap->using_tx_dma = true;
+
+ if (!uap->dmarx.chan)
+ goto skip_rx;
+
+ /* Allocate and map DMA RX buffers */
+ ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
+ DMA_FROM_DEVICE);
+ if (ret) {
+ dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
+ "RX buffer A", ret);
+ goto skip_rx;
+ }
+
+ ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b,
+ DMA_FROM_DEVICE);
+ if (ret) {
+ dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
+ "RX buffer B", ret);
+ pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
+ DMA_FROM_DEVICE);
+ goto skip_rx;
+ }
+ uap->using_rx_dma = true;
+
+skip_rx:
/* Turn on DMA error (RX/TX will be enabled on demand) */
uap->dmacr |= UART011_DMAONERR;
writew(uap->dmacr, uap->port.membase + UART011_DMACR);
@@ -539,11 +896,17 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
if (uap->vendor->dma_threshold)
writew(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16,
uap->port.membase + ST_UART011_DMAWM);
+
+ if (uap->using_rx_dma) {
+ if (pl011_dma_rx_trigger_dma(uap))
+ dev_dbg(uap->port.dev, "could not trigger initial "
+ "RX DMA job, fall back to interrupt mode\n");
+ }
}
static void pl011_dma_shutdown(struct uart_amba_port *uap)
{
- if (!uap->using_dma)
+ if (!(uap->using_tx_dma || uap->using_rx_dma))
return;
/* Disable RX and TX DMA */
@@ -555,19 +918,39 @@ static void pl011_dma_shutdown(struct uart_amba_port *uap)
writew(uap->dmacr, uap->port.membase + UART011_DMACR);
spin_unlock_irq(&uap->port.lock);
- /* In theory, this should already be done by pl011_dma_flush_buffer */
- dmaengine_terminate_all(uap->dmatx.chan);
- if (uap->dmatx.queued) {
- dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
- DMA_TO_DEVICE);
- uap->dmatx.queued = false;
+ if (uap->using_tx_dma) {
+ /* In theory, this should already be done by pl011_dma_flush_buffer */
+ dmaengine_terminate_all(uap->dmatx.chan);
+ if (uap->dmatx.queued) {
+ dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
+ DMA_TO_DEVICE);
+ uap->dmatx.queued = false;
+ }
+
+ kfree(uap->dmatx.buf);
+ uap->using_tx_dma = false;
}
- kfree(uap->dmatx.buf);
+ if (uap->using_rx_dma) {
+ dmaengine_terminate_all(uap->dmarx.chan);
+ /* Clean up the RX DMA */
+ pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
+ pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
+ uap->using_rx_dma = false;
+ }
+}
- uap->using_dma = false;
+static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
+{
+ return uap->using_rx_dma;
}
+static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
+{
+ return uap->using_rx_dma && uap->dmarx.running;
+}
+
+
#else
/* Blank functions if the DMA engine is not available */
static inline void pl011_dma_probe(struct uart_amba_port *uap)
@@ -600,6 +983,29 @@ static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
return false;
}
+static inline void pl011_dma_rx_irq(struct uart_amba_port *uap)
+{
+}
+
+static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
+{
+}
+
+static inline int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
+{
+ return -EIO;
+}
+
+static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
+{
+ return false;
+}
+
+static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
+{
+ return false;
+}
+
#define pl011_dma_flush_buffer NULL
#endif
@@ -630,6 +1036,8 @@ static void pl011_stop_rx(struct uart_port *port)
uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM|
UART011_PEIM|UART011_BEIM|UART011_OEIM);
writew(uap->im, uap->port.membase + UART011_IMSC);
+
+ pl011_dma_rx_stop(uap);
}
static void pl011_enable_ms(struct uart_port *port)
@@ -643,51 +1051,24 @@ static void pl011_enable_ms(struct uart_port *port)
static void pl011_rx_chars(struct uart_amba_port *uap)
{
struct tty_struct *tty = uap->port.state->port.tty;
- unsigned int status, ch, flag, max_count = 256;
-
- status = readw(uap->port.membase + UART01x_FR);
- while ((status & UART01x_FR_RXFE) == 0 && max_count--) {
- ch = readw(uap->port.membase + UART01x_DR) | UART_DUMMY_DR_RX;
- flag = TTY_NORMAL;
- uap->port.icount.rx++;
-
- /*
- * Note that the error handling code is
- * out of the main execution path
- */
- if (unlikely(ch & UART_DR_ERROR)) {
- if (ch & UART011_DR_BE) {
- ch &= ~(UART011_DR_FE | UART011_DR_PE);
- uap->port.icount.brk++;
- if (uart_handle_break(&uap->port))
- goto ignore_char;
- } else if (ch & UART011_DR_PE)
- uap->port.icount.parity++;
- else if (ch & UART011_DR_FE)
- uap->port.icount.frame++;
- if (ch & UART011_DR_OE)
- uap->port.icount.overrun++;
-
- ch &= uap->port.read_status_mask;
-
- if (ch & UART011_DR_BE)
- flag = TTY_BREAK;
- else if (ch & UART011_DR_PE)
- flag = TTY_PARITY;
- else if (ch & UART011_DR_FE)
- flag = TTY_FRAME;
- }
- if (uart_handle_sysrq_char(&uap->port, ch & 255))
- goto ignore_char;
-
- uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
+ pl011_fifo_to_tty(uap);
- ignore_char:
- status = readw(uap->port.membase + UART01x_FR);
- }
spin_unlock(&uap->port.lock);
tty_flip_buffer_push(tty);
+ /*
+ * If we were temporarily out of DMA mode for a while,
+ * attempt to switch back to DMA mode again.
+ */
+ if (pl011_dma_rx_available(uap)) {
+ if (pl011_dma_rx_trigger_dma(uap)) {
+ dev_dbg(uap->port.dev, "could not trigger RX DMA job "
+ "fall back to interrupt mode again\n");
+ uap->im |= UART011_RXIM;
+ } else
+ uap->im &= ~UART011_RXIM;
+ writew(uap->im, uap->port.membase + UART011_IMSC);
+ }
spin_lock(&uap->port.lock);
}
@@ -767,8 +1148,12 @@ static irqreturn_t pl011_int(int irq, void *dev_id)
UART011_RXIS),
uap->port.membase + UART011_ICR);
- if (status & (UART011_RTIS|UART011_RXIS))
- pl011_rx_chars(uap);
+ if (status & (UART011_RTIS|UART011_RXIS)) {
+ if (pl011_dma_rx_running(uap))
+ pl011_dma_rx_irq(uap);
+ else
+ pl011_rx_chars(uap);
+ }
if (status & (UART011_DSRMIS|UART011_DCDMIS|
UART011_CTSMIS|UART011_RIMIS))
pl011_modem_status(uap);
@@ -945,10 +1330,14 @@ static int pl011_startup(struct uart_port *port)
pl011_dma_startup(uap);
/*
- * Finally, enable interrupts
+ * Finally, enable interrupts, only timeouts when using DMA
+ * if initial RX DMA job failed, start in interrupt mode
+ * as well.
*/
spin_lock_irq(&uap->port.lock);
- uap->im = UART011_RXIM | UART011_RTIM;
+ uap->im = UART011_RTIM;
+ if (!pl011_dma_rx_running(uap))
+ uap->im |= UART011_RXIM;
writew(uap->im, uap->port.membase + UART011_IMSC);
spin_unlock_irq(&uap->port.lock);
diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c
index beb1afa..7b951ad 100644
--- a/drivers/tty/serial/max3100.c
+++ b/drivers/tty/serial/max3100.c
@@ -601,7 +601,7 @@ static int max3100_startup(struct uart_port *port)
s->rts = 0;
sprintf(b, "max3100-%d", s->minor);
- s->workqueue = create_freezeable_workqueue(b);
+ s->workqueue = create_freezable_workqueue(b);
if (!s->workqueue) {
dev_warn(&s->spi->dev, "cannot create workqueue\n");
return -EBUSY;
diff --git a/drivers/tty/serial/max3107.c b/drivers/tty/serial/max3107.c
index 910870e..750b4f6 100644
--- a/drivers/tty/serial/max3107.c
+++ b/drivers/tty/serial/max3107.c
@@ -833,7 +833,7 @@ static int max3107_startup(struct uart_port *port)
struct max3107_port *s = container_of(port, struct max3107_port, port);
/* Initialize work queue */
- s->workqueue = create_freezeable_workqueue("max3107");
+ s->workqueue = create_freezable_workqueue("max3107");
if (!s->workqueue) {
dev_err(&s->spi->dev, "Workqueue creation failed\n");
return -EBUSY;
diff --git a/drivers/tty/serial/serial_cs.c b/drivers/tty/serial/serial_cs.c
index 93760b2..1ef4df9 100644
--- a/drivers/tty/serial/serial_cs.c
+++ b/drivers/tty/serial/serial_cs.c
@@ -712,6 +712,7 @@ static struct pcmcia_device_id serial_ids[] = {
PCMCIA_PFC_DEVICE_PROD_ID12(1, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf),
PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0e01),
PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0a05),
+ PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0b05),
PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x1101),
PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0104, 0x0070),
PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x0101, 0x0562),
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index d041c68..0f299b7 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2681,17 +2681,13 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
mutex_lock(&usb_address0_mutex);
- if (!udev->config && oldspeed == USB_SPEED_SUPER) {
- /* Don't reset USB 3.0 devices during an initial setup */
- usb_set_device_state(udev, USB_STATE_DEFAULT);
- } else {
- /* Reset the device; full speed may morph to high speed */
- /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
- retval = hub_port_reset(hub, port1, udev, delay);
- if (retval < 0) /* error or disconnect */
- goto fail;
- /* success, speed is known */
- }
+ /* Reset the device; full speed may morph to high speed */
+ /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
+ retval = hub_port_reset(hub, port1, udev, delay);
+ if (retval < 0) /* error or disconnect */
+ goto fail;
+ /* success, speed is known */
+
retval = -ENODEV;
if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed) {
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 44c5954..81ce6a8 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -48,6 +48,10 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x04b4, 0x0526), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
+ /* Samsung Android phone modem - ID conflict with SPH-I500 */
+ { USB_DEVICE(0x04e8, 0x6601), .driver_info =
+ USB_QUIRK_CONFIG_INTF_STRINGS },
+
/* Roland SC-8820 */
{ USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME },
@@ -68,6 +72,10 @@ static const struct usb_device_id usb_quirk_list[] = {
/* M-Systems Flash Disk Pioneers */
{ USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Keytouch QWERTY Panel keyboard */
+ { USB_DEVICE(0x0926, 0x3333), .driver_info =
+ USB_QUIRK_CONFIG_INTF_STRINGS },
+
/* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */
{ USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF },
diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c
index 3c6e1a0..5e14950 100644
--- a/drivers/usb/gadget/f_phonet.c
+++ b/drivers/usb/gadget/f_phonet.c
@@ -346,14 +346,19 @@ static void pn_rx_complete(struct usb_ep *ep, struct usb_request *req)
if (unlikely(!skb))
break;
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 0,
- req->actual);
- page = NULL;
- if (req->actual < req->length) { /* Last fragment */
+ if (skb->len == 0) { /* First fragment */
skb->protocol = htons(ETH_P_PHONET);
skb_reset_mac_header(skb);
- pskb_pull(skb, 1);
+ /* Can't use pskb_pull() on page in IRQ */
+ memcpy(skb_put(skb, 1), page_address(page), 1);
+ }
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ skb->len == 0, req->actual);
+ page = NULL;
+
+ if (req->actual < req->length) { /* Last fragment */
skb->dev = dev;
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index e8f4f36..a6f21b8 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -29,6 +29,7 @@
#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/of_address.h>
/**
* ehci_xilinx_of_setup - Initialize the device for ehci_reset()
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index fcbf4ab..0231814 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -169,9 +169,10 @@ static void xhci_print_ports(struct xhci_hcd *xhci)
}
}
-void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num)
+void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num)
{
- void *addr;
+ struct xhci_intr_reg __iomem *ir_set = &xhci->run_regs->ir_set[set_num];
+ void __iomem *addr;
u32 temp;
u64 temp_64;
@@ -449,7 +450,7 @@ char *xhci_get_slot_state(struct xhci_hcd *xhci,
}
}
-void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
+static void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
{
/* Fields are 32 bits wide, DMA addresses are in bytes */
int field_size = 32 / 8;
@@ -488,7 +489,7 @@ void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
dbg_rsvd64(xhci, (u64 *)slot_ctx, dma);
}
-void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
+static void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx,
unsigned int last_ep)
{
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 1d0f45f..a953439 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -307,7 +307,7 @@ struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
/***************** Streams structures manipulation *************************/
-void xhci_free_stream_ctx(struct xhci_hcd *xhci,
+static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
unsigned int num_stream_ctxs,
struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
{
@@ -335,7 +335,7 @@ void xhci_free_stream_ctx(struct xhci_hcd *xhci,
* The stream context array must be a power of 2, and can be as small as
* 64 bytes or as large as 1MB.
*/
-struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
+static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
unsigned int num_stream_ctxs, dma_addr_t *dma,
gfp_t mem_flags)
{
@@ -1900,11 +1900,11 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
val &= DBOFF_MASK;
xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x"
" from cap regs base addr\n", val);
- xhci->dba = (void *) xhci->cap_regs + val;
+ xhci->dba = (void __iomem *) xhci->cap_regs + val;
xhci_dbg_regs(xhci);
xhci_print_run_regs(xhci);
/* Set ir_set to interrupt register set 0 */
- xhci->ir_set = (void *) xhci->run_regs->ir_set;
+ xhci->ir_set = &xhci->run_regs->ir_set[0];
/*
* Event ring setup: Allocate a normal ring, but also setup
@@ -1961,7 +1961,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
/* Set the event ring dequeue address */
xhci_set_hc_event_deq(xhci);
xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
- xhci_print_ir_set(xhci, xhci->ir_set, 0);
+ xhci_print_ir_set(xhci, 0);
/*
* XXX: Might need to set the Interrupter Moderation Register to
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 3e8211c..3289bf4 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -474,8 +474,11 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
state->new_deq_seg = find_trb_seg(cur_td->start_seg,
dev->eps[ep_index].stopped_trb,
&state->new_cycle_state);
- if (!state->new_deq_seg)
- BUG();
+ if (!state->new_deq_seg) {
+ WARN_ON(1);
+ return;
+ }
+
/* Dig out the cycle state saved by the xHC during the stop ep cmd */
xhci_dbg(xhci, "Finding endpoint context\n");
ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
@@ -486,8 +489,10 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
state->new_deq_seg = find_trb_seg(state->new_deq_seg,
state->new_deq_ptr,
&state->new_cycle_state);
- if (!state->new_deq_seg)
- BUG();
+ if (!state->new_deq_seg) {
+ WARN_ON(1);
+ return;
+ }
trb = &state->new_deq_ptr->generic;
if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) &&
@@ -2363,12 +2368,13 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
/* Scatter gather list entries may cross 64KB boundaries */
running_total = TRB_MAX_BUFF_SIZE -
- (sg_dma_address(sg) & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
+ running_total &= TRB_MAX_BUFF_SIZE - 1;
if (running_total != 0)
num_trbs++;
/* How many more 64KB chunks to transfer, how many more TRBs? */
- while (running_total < sg_dma_len(sg)) {
+ while (running_total < sg_dma_len(sg) && running_total < temp) {
num_trbs++;
running_total += TRB_MAX_BUFF_SIZE;
}
@@ -2394,11 +2400,11 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
{
if (num_trbs != 0)
- dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
+ dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
"TRBs, %d left\n", __func__,
urb->ep->desc.bEndpointAddress, num_trbs);
if (running_total != urb->transfer_buffer_length)
- dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
+ dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
"queued %#x (%d), asked for %#x (%d)\n",
__func__,
urb->ep->desc.bEndpointAddress,
@@ -2533,8 +2539,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
sg = urb->sg;
addr = (u64) sg_dma_address(sg);
this_sg_len = sg_dma_len(sg);
- trb_buff_len = TRB_MAX_BUFF_SIZE -
- (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
if (trb_buff_len > urb->transfer_buffer_length)
trb_buff_len = urb->transfer_buffer_length;
@@ -2572,7 +2577,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
(unsigned int) addr + trb_buff_len);
if (TRB_MAX_BUFF_SIZE -
- (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)) < trb_buff_len) {
+ (addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
@@ -2616,7 +2621,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
}
trb_buff_len = TRB_MAX_BUFF_SIZE -
- (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ (addr & (TRB_MAX_BUFF_SIZE - 1));
trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
if (running_total + trb_buff_len > urb->transfer_buffer_length)
trb_buff_len =
@@ -2656,7 +2661,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
num_trbs = 0;
/* How much data is (potentially) left before the 64KB boundary? */
running_total = TRB_MAX_BUFF_SIZE -
- (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
+ running_total &= TRB_MAX_BUFF_SIZE - 1;
/* If there's some data on this 64KB chunk, or we have to send a
* zero-length transfer, we need at least one TRB
@@ -2700,8 +2706,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
/* How much data is in the first TRB? */
addr = (u64) urb->transfer_dma;
trb_buff_len = TRB_MAX_BUFF_SIZE -
- (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
- if (urb->transfer_buffer_length < trb_buff_len)
+ (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
+ if (trb_buff_len > urb->transfer_buffer_length)
trb_buff_len = urb->transfer_buffer_length;
first_trb = true;
@@ -2879,8 +2885,8 @@ static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
td_len = urb->iso_frame_desc[i].length;
- running_total = TRB_MAX_BUFF_SIZE -
- (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ running_total = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
+ running_total &= TRB_MAX_BUFF_SIZE - 1;
if (running_total != 0)
num_trbs++;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 34cf4e1..2083fc2 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -109,7 +109,7 @@ int xhci_halt(struct xhci_hcd *xhci)
/*
* Set the run bit and wait for the host to be running.
*/
-int xhci_start(struct xhci_hcd *xhci)
+static int xhci_start(struct xhci_hcd *xhci)
{
u32 temp;
int ret;
@@ -329,7 +329,7 @@ int xhci_init(struct usb_hcd *hcd)
#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
-void xhci_event_ring_work(unsigned long arg)
+static void xhci_event_ring_work(unsigned long arg)
{
unsigned long flags;
int temp;
@@ -473,7 +473,7 @@ int xhci_run(struct usb_hcd *hcd)
xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
xhci_writel(xhci, ER_IRQ_ENABLE(temp),
&xhci->ir_set->irq_pending);
- xhci_print_ir_set(xhci, xhci->ir_set, 0);
+ xhci_print_ir_set(xhci, 0);
if (NUM_TEST_NOOPS > 0)
doorbell = xhci_setup_one_noop(xhci);
@@ -528,7 +528,7 @@ void xhci_stop(struct usb_hcd *hcd)
temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
xhci_writel(xhci, ER_IRQ_DISABLE(temp),
&xhci->ir_set->irq_pending);
- xhci_print_ir_set(xhci, xhci->ir_set, 0);
+ xhci_print_ir_set(xhci, 0);
xhci_dbg(xhci, "cleaning up memory\n");
xhci_mem_cleanup(xhci);
@@ -755,7 +755,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
xhci_writel(xhci, ER_IRQ_DISABLE(temp),
&xhci->ir_set->irq_pending);
- xhci_print_ir_set(xhci, xhci->ir_set, 0);
+ xhci_print_ir_set(xhci, 0);
xhci_dbg(xhci, "cleaning up memory\n");
xhci_mem_cleanup(xhci);
@@ -857,7 +857,7 @@ unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
/* Returns 1 if the arguments are OK;
* returns 0 this is a root hub; returns -EINVAL for NULL pointers.
*/
-int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
+static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
const char *func) {
struct xhci_hcd *xhci;
@@ -1693,7 +1693,7 @@ static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
}
-void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
+static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
struct xhci_dequeue_state *deq_state)
{
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 7f236fd..7f127df 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1348,7 +1348,7 @@ static inline int xhci_link_trb_quirk(struct xhci_hcd *xhci)
}
/* xHCI debugging */
-void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num);
+void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num);
void xhci_print_registers(struct xhci_hcd *xhci);
void xhci_dbg_regs(struct xhci_hcd *xhci);
void xhci_print_run_regs(struct xhci_hcd *xhci);
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 54a8bd1..c292d5c 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1864,6 +1864,7 @@ allocate_instance(struct device *dev,
INIT_LIST_HEAD(&musb->out_bulk);
hcd->uses_new_polling = 1;
+ hcd->has_tt = 1;
musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON;
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index d74a811..e6400be 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -488,6 +488,15 @@ struct musb {
unsigned set_address:1;
unsigned test_mode:1;
unsigned softconnect:1;
+
+ u8 address;
+ u8 test_mode_nr;
+ u16 ackpend; /* ep0 */
+ enum musb_g_ep0_state ep0_state;
+ struct usb_gadget g; /* the gadget */
+ struct usb_gadget_driver *gadget_driver; /* its driver */
+#endif
+
/*
* FIXME: Remove this flag.
*
@@ -501,14 +510,6 @@ struct musb {
*/
unsigned double_buffer_not_ok:1 __deprecated;
- u8 address;
- u8 test_mode_nr;
- u16 ackpend; /* ep0 */
- enum musb_g_ep0_state ep0_state;
- struct usb_gadget g; /* the gadget */
- struct usb_gadget_driver *gadget_driver; /* its driver */
-#endif
-
struct musb_hdrc_config *config;
#ifdef MUSB_CONFIG_PROC_FS
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index a3f1233..bc8badd 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -362,6 +362,7 @@ static int omap2430_musb_init(struct musb *musb)
static int omap2430_musb_exit(struct musb *musb)
{
+ del_timer_sync(&musb_idle_timer);
omap2430_low_level_exit(musb);
otg_put_transceiver(musb->xceiv);
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 7481ff8..0457813 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -301,6 +301,9 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */
.driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
},
+ { USB_DEVICE(0x0f3d, 0x68A3), /* Airprime/Sierra Wireless Direct IP modems */
+ .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
+ },
{ USB_DEVICE(0x413C, 0x08133) }, /* Dell Computer Corp. Wireless 5720 VZW Mobile Broadband (EVDO Rev-A) Minicard GPS Port */
{ }
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index b004b2a..9c014e2 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -295,12 +295,15 @@ static void usb_wwan_indat_callback(struct urb *urb)
__func__, status, endpoint);
} else {
tty = tty_port_tty_get(&port->port);
- if (urb->actual_length) {
- tty_insert_flip_string(tty, data, urb->actual_length);
- tty_flip_buffer_push(tty);
- } else
- dbg("%s: empty read urb received", __func__);
- tty_kref_put(tty);
+ if (tty) {
+ if (urb->actual_length) {
+ tty_insert_flip_string(tty, data,
+ urb->actual_length);
+ tty_flip_buffer_push(tty);
+ } else
+ dbg("%s: empty read urb received", __func__);
+ tty_kref_put(tty);
+ }
/* Resubmit urb so we continue receiving */
if (status != -ESHUTDOWN) {
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index 15a5d89..1c11959 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -27,6 +27,7 @@
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
+#include <linux/usb/cdc.h>
#include "visor.h"
/*
@@ -479,6 +480,17 @@ static int visor_probe(struct usb_serial *serial,
dbg("%s", __func__);
+ /*
+ * some Samsung Android phones in modem mode have the same ID
+ * as SPH-I500, but they are ACM devices, so dont bind to them
+ */
+ if (id->idVendor == SAMSUNG_VENDOR_ID &&
+ id->idProduct == SAMSUNG_SPH_I500_ID &&
+ serial->dev->descriptor.bDeviceClass == USB_CLASS_COMM &&
+ serial->dev->descriptor.bDeviceSubClass ==
+ USB_CDC_SUBCLASS_ACM)
+ return -ENODEV;
+
if (serial->dev->actconfig->desc.bConfigurationValue != 1) {
dev_err(&serial->dev->dev, "active config #%d != 1 ??\n",
serial->dev->actconfig->desc.bConfigurationValue);
diff --git a/drivers/video/backlight/ltv350qv.c b/drivers/video/backlight/ltv350qv.c
index 8010aae..dd0e84a 100644
--- a/drivers/video/backlight/ltv350qv.c
+++ b/drivers/video/backlight/ltv350qv.c
@@ -239,11 +239,15 @@ static int __devinit ltv350qv_probe(struct spi_device *spi)
lcd->spi = spi;
lcd->power = FB_BLANK_POWERDOWN;
lcd->buffer = kzalloc(8, GFP_KERNEL);
+ if (!lcd->buffer) {
+ ret = -ENOMEM;
+ goto out_free_lcd;
+ }
ld = lcd_device_register("ltv350qv", &spi->dev, lcd, &ltv_ops);
if (IS_ERR(ld)) {
ret = PTR_ERR(ld);
- goto out_free_lcd;
+ goto out_free_buffer;
}
lcd->ld = ld;
@@ -257,6 +261,8 @@ static int __devinit ltv350qv_probe(struct spi_device *spi)
out_unregister:
lcd_device_unregister(ld);
+out_free_buffer:
+ kfree(lcd->buffer);
out_free_lcd:
kfree(lcd);
return ret;
@@ -268,6 +274,7 @@ static int __devexit ltv350qv_remove(struct spi_device *spi)
ltv350qv_power(lcd, FB_BLANK_POWERDOWN);
lcd_device_unregister(lcd->ld);
+ kfree(lcd->buffer);
kfree(lcd);
return 0;
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index db8c4c4..2417727 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -37,11 +37,19 @@ static enum shutdown_state shutting_down = SHUTDOWN_INVALID;
#ifdef CONFIG_PM_SLEEP
static int xen_hvm_suspend(void *data)
{
+ int err;
struct sched_shutdown r = { .reason = SHUTDOWN_suspend };
int *cancelled = data;
BUG_ON(!irqs_disabled());
+ err = sysdev_suspend(PMSG_SUSPEND);
+ if (err) {
+ printk(KERN_ERR "xen_hvm_suspend: sysdev_suspend failed: %d\n",
+ err);
+ return err;
+ }
+
*cancelled = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
xen_hvm_post_suspend(*cancelled);
@@ -53,6 +61,8 @@ static int xen_hvm_suspend(void *data)
xen_timer_resume();
}
+ sysdev_resume();
+
return 0;
}