summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
authorScott Wood <scottwood@freescale.com>2015-02-13 22:30:07 (GMT)
committerScott Wood <scottwood@freescale.com>2015-02-13 22:30:07 (GMT)
commite5fc662387b40984ed4b7a14477cc659f4d80b94 (patch)
treecf60ec2e653100003e59216306e241a4485c7d4f /drivers/gpu
parentfa2b857e85998a6843c1a6843446ff00067fa3a4 (diff)
parent38bff34aef9a92ba18386e801604824856883b32 (diff)
downloadlinux-fsl-qoriq-e5fc662387b40984ed4b7a14477cc659f4d80b94.tar.xz
Merge branch 'rtmerge'
Signed-off-by: Scott Wood <scottwood@freescale.com> Conflicts: arch/arm/kvm/mmu.c arch/arm/mm/proc-v7-3level.S arch/powerpc/kernel/vdso32/getcpu.S drivers/crypto/caam/error.c drivers/crypto/caam/sg_sw_sec4.h drivers/usb/host/ehci-fsl.c
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/ast/ast_main.c2
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c4
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c44
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c17
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c54
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c14
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c69
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h4
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c2
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c10
-rw-r--r--drivers/gpu/drm/i915/intel_display.c54
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c9
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c24
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c45
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c6
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c13
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/fan.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c9
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c10
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c16
-rw-r--r--drivers/gpu/drm/qxl/qxl_irq.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c1
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c48
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c21
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c15
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c11
-rw-r--r--drivers/gpu/drm/radeon/cik.c99
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c9
-rw-r--r--drivers/gpu/drm/radeon/cikd.h11
-rw-r--r--drivers/gpu/drm/radeon/cypress_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c41
-rw-r--r--drivers/gpu/drm/radeon/ni_dma.c6
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/r100.c3
-rw-r--r--drivers/gpu/drm/radeon/r600.c14
-rw-r--r--drivers/gpu/drm/radeon/r600_dma.c9
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c37
-rw-r--r--drivers/gpu/drm/radeon/radeon.h6
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c37
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c21
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c38
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c28
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ucode.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c4
-rw-r--r--drivers/gpu/drm/radeon/rs600.c9
-rw-r--r--drivers/gpu/drm/radeon/rs690.c3
-rw-r--r--drivers/gpu/drm/radeon/rv515.c3
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/si.c71
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c41
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c11
-rw-r--r--drivers/gpu/drm/radeon/uvd_v1_0.c10
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c67
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c5
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_slave.c1
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c37
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c43
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c22
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c17
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c16
-rw-r--r--drivers/gpu/host1x/hw/intr_hw.c4
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c9
81 files changed, 1007 insertions, 348 deletions
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 7f6152d..d57a38d 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -100,7 +100,7 @@ static int ast_detect_chip(struct drm_device *dev)
}
ast->vga2_clone = false;
} else {
- ast->chip = 2000;
+ ast->chip = AST2000;
DRM_INFO("AST 2000 detected\n");
}
}
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 7fc9f72..e8f6418 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -1012,8 +1012,8 @@ static u32 copy_cursor_image(u8 *src, u8 *dst, int width, int height)
srcdata32[1].ul = *((u32 *)(srcxor + 4)) & 0xf0f0f0f0;
data32.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4);
data32.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4);
- data32.b[2] = srcdata32[0].b[1] | (srcdata32[1].b[0] >> 4);
- data32.b[3] = srcdata32[0].b[3] | (srcdata32[1].b[2] >> 4);
+ data32.b[2] = srcdata32[1].b[1] | (srcdata32[1].b[0] >> 4);
+ data32.b[3] = srcdata32[1].b[3] | (srcdata32[1].b[2] >> 4);
writel(data32.ul, dstxor);
csum += data32.ul;
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index 138364d..73fed35 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/console.h>
#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
#include "cirrus_drv.h"
@@ -31,6 +32,8 @@ static struct drm_driver driver;
static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
{ PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446, 0x1af4, 0x1100, 0,
0, 0 },
+ { PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446, PCI_VENDOR_ID_XEN,
+ 0x0001, 0, 0, 0 },
{0,}
};
@@ -75,6 +78,41 @@ static void cirrus_pci_remove(struct pci_dev *pdev)
drm_put_dev(dev);
}
+static int cirrus_pm_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct cirrus_device *cdev = drm_dev->dev_private;
+
+ drm_kms_helper_poll_disable(drm_dev);
+
+ if (cdev->mode_info.gfbdev) {
+ console_lock();
+ fb_set_suspend(cdev->mode_info.gfbdev->helper.fbdev, 1);
+ console_unlock();
+ }
+
+ return 0;
+}
+
+static int cirrus_pm_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct cirrus_device *cdev = drm_dev->dev_private;
+
+ drm_helper_resume_force_mode(drm_dev);
+
+ if (cdev->mode_info.gfbdev) {
+ console_lock();
+ fb_set_suspend(cdev->mode_info.gfbdev->helper.fbdev, 0);
+ console_unlock();
+ }
+
+ drm_kms_helper_poll_enable(drm_dev);
+ return 0;
+}
+
static const struct file_operations cirrus_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
@@ -104,11 +142,17 @@ static struct drm_driver driver = {
.dumb_destroy = drm_gem_dumb_destroy,
};
+static const struct dev_pm_ops cirrus_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(cirrus_pm_suspend,
+ cirrus_pm_resume)
+};
+
static struct pci_driver cirrus_pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = cirrus_pci_probe,
.remove = cirrus_pci_remove,
+ .driver.pm = &cirrus_pm_ops,
};
static int __init cirrus_init(void)
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index 3592616..c6ec012 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -308,6 +308,9 @@ static int cirrus_crtc_mode_set(struct drm_crtc *crtc,
WREG_HDR(hdr);
cirrus_crtc_do_set_base(crtc, old_fb, x, y, 0);
+
+ /* Unblank (needed on S3 resume, vgabios doesn't do it then) */
+ outb(0x20, 0x3c0);
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index aaeac32..07a77d7 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1386,10 +1386,13 @@ unlock:
out:
switch (ret) {
case -EIO:
- /* If this -EIO is due to a gpu hang, give the reset code a
- * chance to clean up the mess. Otherwise return the proper
- * SIGBUS. */
- if (i915_terminally_wedged(&dev_priv->gpu_error))
+ /*
+ * We eat errors when the gpu is terminally wedged to avoid
+ * userspace unduly crashing (gl has no provisions for mmaps to
+ * fail). But any other -EIO isn't ours (e.g. swap in failure)
+ * and so needs to be reported.
+ */
+ if (!i915_terminally_wedged(&dev_priv->gpu_error))
return VM_FAULT_SIGBUS;
case -EAGAIN:
/*
@@ -3419,7 +3422,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct i915_vma *vma;
+ struct i915_vma *vma, *next;
int ret;
if (obj->cache_level == cache_level)
@@ -3430,7 +3433,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
return -EBUSY;
}
- list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
ret = i915_vma_unbind(vma);
if (ret)
@@ -4792,7 +4795,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
if (!mutex_is_locked(mutex))
return false;
-#if (defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)) && !defined(CONFIG_PREEMPT_RT_BASE)
+#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES) && !defined(CONFIG_PREEMPT_RT_BASE)
return mutex->owner == task;
#else
/* Since UP may be pre-empted, we cannot assume that we own the lock */
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 979a6ea..d5f6bee 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -674,9 +674,9 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
* relocations were valid.
*/
for (j = 0; j < exec[i].relocation_count; j++) {
- if (copy_to_user(&user_relocs[j].presumed_offset,
- &invalid_offset,
- sizeof(invalid_offset))) {
+ if (__copy_to_user(&user_relocs[j].presumed_offset,
+ &invalid_offset,
+ sizeof(invalid_offset))) {
ret = -EFAULT;
mutex_lock(&dev->struct_mutex);
goto err;
@@ -1213,18 +1213,21 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list,
&dev_priv->gtt.base);
if (!ret) {
+ struct drm_i915_gem_exec_object __user *user_exec_list =
+ to_user_ptr(args->buffers_ptr);
+
/* Copy the new buffer offsets back to the user's exec list. */
- for (i = 0; i < args->buffer_count; i++)
- exec_list[i].offset = exec2_list[i].offset;
- /* ... and back out to userspace */
- ret = copy_to_user(to_user_ptr(args->buffers_ptr),
- exec_list,
- sizeof(*exec_list) * args->buffer_count);
- if (ret) {
- ret = -EFAULT;
- DRM_DEBUG("failed to copy %d exec entries "
- "back to user (%d)\n",
- args->buffer_count, ret);
+ for (i = 0; i < args->buffer_count; i++) {
+ ret = __copy_to_user(&user_exec_list[i].offset,
+ &exec2_list[i].offset,
+ sizeof(user_exec_list[i].offset));
+ if (ret) {
+ ret = -EFAULT;
+ DRM_DEBUG("failed to copy %d exec entries "
+ "back to user (%d)\n",
+ args->buffer_count, ret);
+ break;
+ }
}
}
@@ -1272,14 +1275,21 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
&dev_priv->gtt.base);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
- ret = copy_to_user(to_user_ptr(args->buffers_ptr),
- exec2_list,
- sizeof(*exec2_list) * args->buffer_count);
- if (ret) {
- ret = -EFAULT;
- DRM_DEBUG("failed to copy %d exec entries "
- "back to user (%d)\n",
- args->buffer_count, ret);
+ struct drm_i915_gem_exec_object2 *user_exec_list =
+ to_user_ptr(args->buffers_ptr);
+ int i;
+
+ for (i = 0; i < args->buffer_count; i++) {
+ ret = __copy_to_user(&user_exec_list[i].offset,
+ &exec2_list[i].offset,
+ sizeof(user_exec_list[i].offset));
+ if (ret) {
+ ret = -EFAULT;
+ DRM_DEBUG("failed to copy %d exec entries "
+ "back to user\n",
+ args->buffer_count);
+ break;
+ }
}
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index c7fa2e4..4ac33d2 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -521,6 +521,16 @@ void i915_check_and_clear_faults(struct drm_device *dev)
POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS]));
}
+static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
+{
+ if (INTEL_INFO(dev_priv->dev)->gen < 6) {
+ intel_gtt_chipset_flush();
+ } else {
+ I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+ POSTING_READ(GFX_FLSH_CNTL_GEN6);
+ }
+}
+
void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -537,6 +547,8 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
dev_priv->gtt.base.start / PAGE_SIZE,
dev_priv->gtt.base.total / PAGE_SIZE,
true);
+
+ i915_ggtt_flush(dev_priv);
}
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
@@ -557,7 +569,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
i915_gem_gtt_bind_object(obj, obj->cache_level);
}
- i915_gem_chipset_flush(dev);
+ i915_ggtt_flush(dev_priv);
}
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index c077df0..c052e1e 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -74,6 +74,50 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
if (base == 0)
return 0;
+ /* make sure we don't clobber the GTT if it's within stolen memory */
+ if (INTEL_INFO(dev)->gen <= 4 && !IS_G33(dev) && !IS_G4X(dev)) {
+ struct {
+ u32 start, end;
+ } stolen[2] = {
+ { .start = base, .end = base + dev_priv->gtt.stolen_size, },
+ { .start = base, .end = base + dev_priv->gtt.stolen_size, },
+ };
+ u64 gtt_start, gtt_end;
+
+ gtt_start = I915_READ(PGTBL_CTL);
+ if (IS_GEN4(dev))
+ gtt_start = (gtt_start & PGTBL_ADDRESS_LO_MASK) |
+ (gtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
+ else
+ gtt_start &= PGTBL_ADDRESS_LO_MASK;
+ gtt_end = gtt_start + gtt_total_entries(dev_priv->gtt) * 4;
+
+ if (gtt_start >= stolen[0].start && gtt_start < stolen[0].end)
+ stolen[0].end = gtt_start;
+ if (gtt_end > stolen[1].start && gtt_end <= stolen[1].end)
+ stolen[1].start = gtt_end;
+
+ /* pick the larger of the two chunks */
+ if (stolen[0].end - stolen[0].start >
+ stolen[1].end - stolen[1].start) {
+ base = stolen[0].start;
+ dev_priv->gtt.stolen_size = stolen[0].end - stolen[0].start;
+ } else {
+ base = stolen[1].start;
+ dev_priv->gtt.stolen_size = stolen[1].end - stolen[1].start;
+ }
+
+ if (stolen[0].start != stolen[1].start ||
+ stolen[0].end != stolen[1].end) {
+ DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n",
+ (unsigned long long) gtt_start,
+ (unsigned long long) gtt_end - 1);
+ DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n",
+ base, base + (u32) dev_priv->gtt.stolen_size - 1);
+ }
+ }
+
+
/* Verify that nothing else uses this physical address. Stolen
* memory should be reserved by the BIOS and hidden from the
* kernel. So if the region is already marked as busy, something
@@ -82,9 +126,26 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size,
"Graphics Stolen Memory");
if (r == NULL) {
- DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
- base, base + (uint32_t)dev_priv->gtt.stolen_size);
- base = 0;
+ /*
+ * One more attempt but this time requesting region from
+ * base + 1, as we have seen that this resolves the region
+ * conflict with the PCI Bus.
+ * This is a BIOS w/a: Some BIOS wrap stolen in the root
+ * PCI bus, but have an off-by-one error. Hence retry the
+ * reservation starting from 1 instead of 0.
+ */
+ r = devm_request_mem_region(dev->dev, base + 1,
+ dev_priv->gtt.stolen_size - 1,
+ "Graphics Stolen Memory");
+ /*
+ * GEN3 firmware likes to smash pci bridges into the stolen
+ * range. Apparently this works.
+ */
+ if (r == NULL && !IS_GEN3(dev)) {
+ DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
+ base, base + (uint32_t)dev_priv->gtt.stolen_size);
+ base = 0;
+ }
}
return base;
@@ -202,7 +263,7 @@ int i915_gem_init_stolen(struct drm_device *dev)
int bios_reserved = 0;
#ifdef CONFIG_INTEL_IOMMU
- if (intel_iommu_gfx_mapped) {
+ if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) {
DRM_INFO("DMAR active, disabling use of stolen memory\n");
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 375abe7..9d344da 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -309,6 +309,7 @@
#define PIPE_CONTROL_GLOBAL_GTT_IVB (1<<24) /* gen7+ */
#define PIPE_CONTROL_CS_STALL (1<<20)
#define PIPE_CONTROL_TLB_INVALIDATE (1<<18)
+#define PIPE_CONTROL_MEDIA_STATE_CLEAR (1<<16)
#define PIPE_CONTROL_QW_WRITE (1<<14)
#define PIPE_CONTROL_DEPTH_STALL (1<<13)
#define PIPE_CONTROL_WRITE_FLUSH (1<<12)
@@ -570,6 +571,9 @@
/*
* Instruction and interrupt control regs
*/
+#define PGTBL_CTL 0x02020
+#define PGTBL_ADDRESS_LO_MASK 0xfffff000 /* bits [31:12] */
+#define PGTBL_ADDRESS_HI_MASK 0x000000f0 /* bits [35:32] (gen4) */
#define PGTBL_ER 0x02024
#define RENDER_RING_BASE 0x02000
#define BSD_RING_BASE 0x04000
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 53f2bed..16ca7f6 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -657,7 +657,7 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->vbt.lvds_ssc_freq);
}
-static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
+static int intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
{
DRM_DEBUG_KMS("Falling back to manually reading VBT from "
"VBIOS ROM for %s\n",
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 10d1de5..3c25af4 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -743,7 +743,7 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
.destroy = intel_encoder_destroy,
};
-static int __init intel_no_crt_dmi_callback(const struct dmi_system_id *id)
+static int intel_no_crt_dmi_callback(const struct dmi_system_id *id)
{
DRM_INFO("Skipping CRT initialization for %s\n", id->ident);
return 1;
@@ -758,6 +758,14 @@ static const struct dmi_system_id intel_no_crt[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
},
},
+ {
+ .callback = intel_no_crt_dmi_callback,
+ .ident = "DELL XPS 8700",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "XPS 8700"),
+ },
+ },
{ }
};
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 5aa836e..37a9d3c 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3537,7 +3537,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
ironlake_fdi_disable(crtc);
ironlake_disable_pch_transcoder(dev_priv, pipe);
- intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
if (HAS_PCH_CPT(dev)) {
/* disable TRANS_DP_CTL */
@@ -3613,7 +3612,6 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
if (intel_crtc->config.has_pch_encoder) {
lpt_disable_pch_transcoder(dev_priv);
- intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
intel_ddi_fdi_disable(crtc);
}
@@ -8688,11 +8686,22 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_I(requested_mode.hdisplay);
PIPE_CONF_CHECK_I(requested_mode.vdisplay);
- PIPE_CONF_CHECK_I(gmch_pfit.control);
- /* pfit ratios are autocomputed by the hw on gen4+ */
- if (INTEL_INFO(dev)->gen < 4)
- PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
- PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
+ /*
+ * FIXME: BIOS likes to set up a cloned config with lvds+external
+ * screen. Since we don't yet re-compute the pipe config when moving
+ * just the lvds port away to another pipe the sw tracking won't match.
+ *
+ * Proper atomic modesets with recomputed global state will fix this.
+ * Until then just don't check gmch state for inherited modes.
+ */
+ if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_INHERITED_MODE)) {
+ PIPE_CONF_CHECK_I(gmch_pfit.control);
+ /* pfit ratios are autocomputed by the hw on gen4+ */
+ if (INTEL_INFO(dev)->gen < 4)
+ PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
+ PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
+ }
+
PIPE_CONF_CHECK_I(pch_pfit.enabled);
if (current_config->pch_pfit.enabled) {
PIPE_CONF_CHECK_I(pch_pfit.pos);
@@ -10097,6 +10106,9 @@ static struct intel_quirk intel_quirks[] = {
/* Acer Aspire 4736Z */
{ 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
+ /* Acer Aspire 5336 */
+ { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
+
/* Dell XPS13 HD Sandy Bridge */
{ 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
/* Dell XPS13 HD and XPS13 FHD Ivy Bridge */
@@ -10228,15 +10240,6 @@ void intel_modeset_init(struct drm_device *dev)
intel_disable_fbc(dev);
}
-static void
-intel_connector_break_all_links(struct intel_connector *connector)
-{
- connector->base.dpms = DRM_MODE_DPMS_OFF;
- connector->base.encoder = NULL;
- connector->encoder->connectors_active = false;
- connector->encoder->base.crtc = NULL;
-}
-
static void intel_enable_pipe_a(struct drm_device *dev)
{
struct intel_connector *connector;
@@ -10318,8 +10321,17 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
if (connector->encoder->base.crtc != &crtc->base)
continue;
- intel_connector_break_all_links(connector);
+ connector->base.dpms = DRM_MODE_DPMS_OFF;
+ connector->base.encoder = NULL;
}
+ /* multiple connectors may have the same encoder:
+ * handle them and break crtc link separately */
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head)
+ if (connector->encoder->base.crtc == &crtc->base) {
+ connector->encoder->base.crtc = NULL;
+ connector->encoder->connectors_active = false;
+ }
WARN_ON(crtc->active);
crtc->base.enabled = false;
@@ -10390,6 +10402,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
drm_get_encoder_name(&encoder->base));
encoder->disable(encoder);
}
+ encoder->base.crtc = NULL;
+ encoder->connectors_active = false;
/* Inconsistent output/port/pipe state happens presumably due to
* a bug in one of the get_hw_state functions. Or someplace else
@@ -10400,8 +10414,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
base.head) {
if (connector->encoder != encoder)
continue;
-
- intel_connector_break_all_links(connector);
+ connector->base.dpms = DRM_MODE_DPMS_OFF;
+ connector->base.encoder = NULL;
}
}
/* Enabled encoders without active connectors will be fixed in
@@ -10443,6 +10457,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
base.head) {
memset(&crtc->config, 0, sizeof(crtc->config));
+ crtc->config.quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
+
crtc->active = dev_priv->display.get_pipe_config(crtc,
&crtc->config);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 7f2b384..569c0c5 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -204,7 +204,8 @@ struct intel_crtc_config {
* tracked with quirk flags so that fastboot and state checker can act
* accordingly.
*/
-#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
+#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
+#define PIPE_CONFIG_QUIRK_INHERITED_MODE (1<<1) /* mode inherited from firmware */
unsigned long quirks;
struct drm_display_mode requested_mode;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 4d302f3d..f5c4366 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -830,11 +830,11 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
}
}
-static int hdmi_portclock_limit(struct intel_hdmi *hdmi)
+static int hdmi_portclock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit)
{
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
- if (!hdmi->has_hdmi_sink || IS_G4X(dev))
+ if ((respect_dvi_limit && !hdmi->has_hdmi_sink) || IS_G4X(dev))
return 165000;
else if (IS_HASWELL(dev))
return 300000;
@@ -845,7 +845,8 @@ static int hdmi_portclock_limit(struct intel_hdmi *hdmi)
static int intel_hdmi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector)))
+ if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector),
+ true))
return MODE_CLOCK_HIGH;
if (mode->clock < 20000)
return MODE_CLOCK_LOW;
@@ -863,7 +864,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
int clock_12bpc = pipe_config->requested_mode.clock * 3 / 2;
- int portclock_limit = hdmi_portclock_limit(intel_hdmi);
+ int portclock_limit = hdmi_portclock_limit(intel_hdmi, false);
int desired_bpp;
if (intel_hdmi->color_range_auto) {
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index b8af94a..e5473da 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -523,7 +523,7 @@ static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
.destroy = intel_encoder_destroy,
};
-static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
+static int intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
{
DRM_INFO("Skipping LVDS initialization for %s\n", id->ident);
return 1;
@@ -934,6 +934,17 @@ void intel_lvds_init(struct drm_device *dev)
int pipe;
u8 pin;
+ /*
+ * Unlock registers and just leave them unlocked. Do this before
+ * checking quirk lists to avoid bogus WARNINGs.
+ */
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(PCH_PP_CONTROL,
+ I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
+ } else {
+ I915_WRITE(PP_CONTROL,
+ I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
+ }
if (!intel_lvds_supported(dev))
return;
@@ -1113,17 +1124,6 @@ out:
DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
lvds_encoder->is_dual_link ? "dual" : "single");
- /*
- * Unlock registers and just
- * leave them unlocked
- */
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(PCH_PP_CONTROL,
- I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
- } else {
- I915_WRITE(PP_CONTROL,
- I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
- }
lvds_connector->lid_notifier.notifier_call = intel_lid_notify;
if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) {
DRM_DEBUG_KMS("lid notifier registration failed\n");
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 26c2ea3..a7daa2a 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -1592,6 +1592,16 @@ static void i9xx_update_wm(struct drm_device *dev)
DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
+ if (IS_I915GM(dev) && enabled) {
+ struct intel_framebuffer *fb;
+
+ fb = to_intel_framebuffer(enabled->fb);
+
+ /* self-refresh seems busted with untiled */
+ if (fb->obj->tiling_mode == I915_TILING_NONE)
+ enabled = NULL;
+ }
+
/*
* Overlay gets an aggressive default since video jitter is bad.
*/
@@ -5337,24 +5347,26 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
static struct i915_power_well *hsw_pwr;
/* Display audio driver power well request */
-void i915_request_power_well(void)
+int i915_request_power_well(void)
{
- if (WARN_ON(!hsw_pwr))
- return;
+ if (!hsw_pwr)
+ return -ENODEV;
spin_lock_irq(&hsw_pwr->lock);
if (!hsw_pwr->count++ &&
!hsw_pwr->i915_request)
__intel_set_power_well(hsw_pwr->device, true);
spin_unlock_irq(&hsw_pwr->lock);
+ return 0;
}
EXPORT_SYMBOL_GPL(i915_request_power_well);
/* Display audio driver power well release */
-void i915_release_power_well(void)
+int i915_release_power_well(void)
{
- if (WARN_ON(!hsw_pwr))
- return;
+ if (!hsw_pwr)
+ return -ENODEV;
+
spin_lock_irq(&hsw_pwr->lock);
WARN_ON(!hsw_pwr->count);
@@ -5362,9 +5374,30 @@ void i915_release_power_well(void)
!hsw_pwr->i915_request)
__intel_set_power_well(hsw_pwr->device, false);
spin_unlock_irq(&hsw_pwr->lock);
+ return 0;
}
EXPORT_SYMBOL_GPL(i915_release_power_well);
+/*
+ * Private interface for the audio driver to get CDCLK in kHz.
+ *
+ * Caller must request power well using i915_request_power_well() prior to
+ * making the call.
+ */
+int i915_get_cdclk_freq(void)
+{
+ struct drm_i915_private *dev_priv;
+
+ if (!hsw_pwr)
+ return -ENODEV;
+
+ dev_priv = container_of(hsw_pwr, struct drm_i915_private,
+ power_well);
+
+ return intel_ddi_get_cdclk_freq(dev_priv);
+}
+EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
+
int i915_init_power_well(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 7507fe0..776ed3f 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -322,12 +322,15 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,
flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
/*
* TLB invalidate requires a post-sync write.
*/
flags |= PIPE_CONTROL_QW_WRITE;
flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
+ flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
+
/* Workaround: we must issue a pipe_control with CS-stall bit
* set before a pipe_control command that has the state cache
* invalidate bit set. */
@@ -423,6 +426,9 @@ static int init_ring_common(struct intel_ring_buffer *ring)
}
}
+ /* Enforce ordering by reading HEAD register back */
+ I915_READ_HEAD(ring);
+
/* Initialize the ring. This must happen _after_ we've cleared the ring
* registers with the above sequence (the readback of the HEAD registers
* also enforces ordering), otherwise the hw might lose the new ring
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index dd6f84b..6b58be1 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -854,6 +854,10 @@ intel_enable_tv(struct intel_encoder *encoder)
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ /* Prevents vblank waits from timing out in intel_tv_detect_type() */
+ intel_wait_for_vblank(encoder->base.dev,
+ to_intel_crtc(encoder->base.crtc)->pipe);
+
I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
}
@@ -1530,9 +1534,14 @@ static int tv_is_present_in_vbt(struct drm_device *dev)
/*
* If the device type is not TV, continue.
*/
- if (p_child->device_type != DEVICE_TYPE_INT_TV &&
- p_child->device_type != DEVICE_TYPE_TV)
+ switch (p_child->device_type) {
+ case DEVICE_TYPE_INT_TV:
+ case DEVICE_TYPE_TV:
+ case DEVICE_TYPE_TV_SVIDEO_COMPOSITE:
+ break;
+ default:
continue;
+ }
/* Only when the addin_offset is non-zero, it is regarded
* as present.
*/
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index ee7d649..a9a0154 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -166,6 +166,8 @@ static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
{
__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
_MASKED_BIT_DISABLE(0xffff));
+ __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
+ _MASKED_BIT_DISABLE(0xffff));
/* something from same cacheline, but !FORCEWAKE_VLV */
__raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
index 52dd7a1..8f33655 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -678,7 +678,7 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id,
}
if (outp == 8)
- return false;
+ return conf;
data = exec_lookup(priv, head, outp, ctrl, dcb, &ver, &hdr, &cnt, &len, &info1);
if (data == 0x0000)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
index 2d9b9d7..f3edd28 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
@@ -124,6 +124,7 @@ dcb_outp_parse(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len,
struct dcb_output *outp)
{
u16 dcb = dcb_outp(bios, idx, ver, len);
+ memset(outp, 0x00, sizeof(*outp));
if (dcb) {
if (*ver >= 0x20) {
u32 conn = nv_ro32(bios, dcb + 0x00);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
index 39f47b9..c14cb09 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
@@ -54,8 +54,10 @@ nouveau_fan_update(struct nouveau_fan *fan, bool immediate, int target)
/* check that we're not already at the target duty cycle */
duty = fan->get(therm);
- if (duty == target)
- goto done;
+ if (duty == target) {
+ spin_unlock_irqrestore(&fan->lock, flags);
+ return 0;
+ }
/* smooth out the fanspeed increase/decrease */
if (!immediate && duty >= 0) {
@@ -73,8 +75,15 @@ nouveau_fan_update(struct nouveau_fan *fan, bool immediate, int target)
nv_debug(therm, "FAN update: %d\n", duty);
ret = fan->set(therm, duty);
- if (ret)
- goto done;
+ if (ret) {
+ spin_unlock_irqrestore(&fan->lock, flags);
+ return ret;
+ }
+
+ /* fan speed updated, drop the fan lock before grabbing the
+ * alarm-scheduling lock and risking a deadlock
+ */
+ spin_unlock_irqrestore(&fan->lock, flags);
/* schedule next fan update, if not at target speed already */
if (list_empty(&fan->alarm.head) && target != duty) {
@@ -92,8 +101,6 @@ nouveau_fan_update(struct nouveau_fan *fan, bool immediate, int target)
ptimer->alarm(ptimer, delay * 1000 * 1000, &fan->alarm);
}
-done:
- spin_unlock_irqrestore(&fan->lock, flags);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 200e856..efdb689 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -419,9 +419,6 @@ bool nouveau_acpi_rom_supported(struct pci_dev *pdev)
acpi_status status;
acpi_handle dhandle, rom_handle;
- if (!nouveau_dsm_priv.dsm_detected && !nouveau_dsm_priv.optimus_detected)
- return false;
-
dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
if (!dhandle)
return false;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 7848590..fb072e6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -586,9 +586,9 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
}
ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
- mutex_unlock(&chan->cli->mutex);
if (ret)
goto fail_unreserve;
+ mutex_unlock(&chan->cli->mutex);
/* Update the crtc struct and cleanup */
crtc->fb = fb;
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index 81638d7..13790ea 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -98,7 +98,16 @@ void
nouveau_vga_fini(struct nouveau_drm *drm)
{
struct drm_device *dev = drm->dev;
+ bool runtime = false;
+
+ if (nouveau_runtime_pm == 1)
+ runtime = true;
+ if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm()))
+ runtime = true;
+
vga_switcheroo_unregister_client(dev->pdev);
+ if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
+ vga_switcheroo_fini_domain_pm_ops(drm->dev->dev);
vga_client_register(dev->pdev, NULL, NULL, NULL);
}
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index acf6678..9501728 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -199,7 +199,7 @@ static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm)
static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
struct page **pages, uint32_t npages, uint32_t roll)
{
- dma_addr_t pat_pa = 0;
+ dma_addr_t pat_pa = 0, data_pa = 0;
uint32_t *data;
struct pat *pat;
struct refill_engine *engine = txn->engine_handle;
@@ -223,7 +223,9 @@ static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
.lut_id = engine->tcm->lut_id,
};
- data = alloc_dma(txn, 4*i, &pat->data_pa);
+ data = alloc_dma(txn, 4*i, &data_pa);
+ /* FIXME: what if data_pa is more than 32-bit ? */
+ pat->data_pa = data_pa;
while (i--) {
int n = i + roll;
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 533f6eb..6b01276 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -791,7 +791,7 @@ int omap_gem_get_paddr(struct drm_gem_object *obj,
omap_obj->paddr = tiler_ssptr(block);
omap_obj->block = block;
- DBG("got paddr: %08x", omap_obj->paddr);
+ DBG("got paddr: %pad", &omap_obj->paddr);
}
omap_obj->paddr_cnt++;
@@ -988,9 +988,9 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
off = drm_vma_node_start(&obj->vma_node);
- seq_printf(m, "%08x: %2d (%2d) %08llx %08Zx (%2d) %p %4d",
+ seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
omap_obj->flags, obj->name, obj->refcount.refcount.counter,
- off, omap_obj->paddr, omap_obj->paddr_cnt,
+ off, &omap_obj->paddr, omap_obj->paddr_cnt,
omap_obj->vaddr, omap_obj->roll);
if (omap_obj->flags & OMAP_BO_TILED) {
@@ -1473,8 +1473,8 @@ void omap_gem_init(struct drm_device *dev)
entry->paddr = tiler_ssptr(block);
entry->block = block;
- DBG("%d:%d: %dx%d: paddr=%08x stride=%d", i, j, w, h,
- entry->paddr,
+ DBG("%d:%d: %dx%d: paddr=%pad stride=%d", i, j, w, h,
+ &entry->paddr,
usergart[i].stride_pfn << PAGE_SHIFT);
}
}
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 046d5e6..5b62e21 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -142,8 +142,8 @@ static void omap_plane_pre_apply(struct omap_drm_apply *apply)
DBG("%dx%d -> %dx%d (%d)", info->width, info->height,
info->out_width, info->out_height,
info->screen_width);
- DBG("%d,%d %08x %08x", info->pos_x, info->pos_y,
- info->paddr, info->p_uv_addr);
+ DBG("%d,%d %pad %pad", info->pos_x, info->pos_y,
+ &info->paddr, &info->p_uv_addr);
/* TODO: */
ilace = false;
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 835caba..5f79e51 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -508,7 +508,6 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
struct qxl_framebuffer *qfb;
struct qxl_bo *bo, *old_bo = NULL;
struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
- uint32_t width, height, base_offset;
bool recreate_primary = false;
int ret;
int surf_id;
@@ -538,9 +537,10 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
if (qcrtc->index == 0)
recreate_primary = true;
- width = mode->hdisplay;
- height = mode->vdisplay;
- base_offset = 0;
+ if (bo->surf.stride * bo->surf.height > qdev->vram_size) {
+ DRM_ERROR("Mode doesn't fit in vram size (vgamem)");
+ return -EINVAL;
+ }
ret = qxl_bo_reserve(bo, false);
if (ret != 0)
@@ -554,10 +554,10 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
if (recreate_primary) {
qxl_io_destroy_primary(qdev);
qxl_io_log(qdev,
- "recreate primary: %dx%d (was %dx%d,%d,%d)\n",
- width, height, bo->surf.width,
- bo->surf.height, bo->surf.stride, bo->surf.format);
- qxl_io_create_primary(qdev, base_offset, bo);
+ "recreate primary: %dx%d,%d,%d\n",
+ bo->surf.width, bo->surf.height,
+ bo->surf.stride, bo->surf.format);
+ qxl_io_create_primary(qdev, 0, bo);
bo->is_primary = true;
surf_id = 0;
} else {
diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
index 21393dc..f4b6b89 100644
--- a/drivers/gpu/drm/qxl/qxl_irq.c
+++ b/drivers/gpu/drm/qxl/qxl_irq.c
@@ -33,6 +33,9 @@ irqreturn_t qxl_irq_handler(DRM_IRQ_ARGS)
pending = xchg(&qdev->ram_header->int_pending, 0);
+ if (!pending)
+ return IRQ_NONE;
+
atomic_inc(&qdev->irq_received);
if (pending & QXL_INTERRUPT_DISPLAY) {
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 037786d..ed90fbe 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -433,6 +433,7 @@ static int qxl_sync_obj_flush(void *sync_obj)
static void qxl_sync_obj_unref(void **sync_obj)
{
+ *sync_obj = NULL;
}
static void *qxl_sync_obj_ref(void *sync_obj)
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index dcb652a..ba8742a 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -859,14 +859,16 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc,
args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */
if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_REF_DIV_SRC;
- switch (bpc) {
- case 8:
- default:
- args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP;
- break;
- case 10:
- args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP;
- break;
+ if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
+ switch (bpc) {
+ case 8:
+ default:
+ args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP;
+ break;
+ case 10:
+ args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP;
+ break;
+ }
}
args.v5.ucTransmitterID = encoder_id;
args.v5.ucEncoderMode = encoder_mode;
@@ -881,20 +883,22 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc,
args.v6.ucMiscInfo = 0; /* HDMI depth, etc. */
if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_REF_DIV_SRC;
- switch (bpc) {
- case 8:
- default:
- args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP;
- break;
- case 10:
- args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP;
- break;
- case 12:
- args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP;
- break;
- case 16:
- args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP;
- break;
+ if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
+ switch (bpc) {
+ case 8:
+ default:
+ args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP;
+ break;
+ case 10:
+ args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP;
+ break;
+ case 12:
+ args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP;
+ break;
+ case 16:
+ args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP;
+ break;
+ }
}
args.v6.ucTransmitterID = encoder_id;
args.v6.ucEncoderMode = encoder_mode;
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 0088541..26059ec 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -384,6 +384,19 @@ static int dp_get_max_dp_pix_clock(int link_rate,
/***** radeon specific DP functions *****/
+static int radeon_dp_get_max_link_rate(struct drm_connector *connector,
+ u8 dpcd[DP_DPCD_SIZE])
+{
+ int max_link_rate;
+
+ if (radeon_connector_is_dp12_capable(connector))
+ max_link_rate = min(drm_dp_max_link_rate(dpcd), 540000);
+ else
+ max_link_rate = min(drm_dp_max_link_rate(dpcd), 270000);
+
+ return max_link_rate;
+}
+
/* First get the min lane# when low rate is used according to pixel clock
* (prefer low rate), second check max lane# supported by DP panel,
* if the max lane# < low rate lane# then use max lane# instead.
@@ -393,7 +406,7 @@ static int radeon_dp_get_dp_lane_number(struct drm_connector *connector,
int pix_clock)
{
int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
- int max_link_rate = drm_dp_max_link_rate(dpcd);
+ int max_link_rate = radeon_dp_get_max_link_rate(connector, dpcd);
int max_lane_num = drm_dp_max_lane_count(dpcd);
int lane_num;
int max_dp_pix_clock;
@@ -431,7 +444,7 @@ static int radeon_dp_get_dp_link_clock(struct drm_connector *connector,
return 540000;
}
- return drm_dp_max_link_rate(dpcd);
+ return radeon_dp_get_max_link_rate(connector, dpcd);
}
static u8 radeon_dp_encoder_service(struct radeon_device *rdev,
@@ -561,6 +574,10 @@ int radeon_dp_mode_valid_helper(struct drm_connector *connector,
struct radeon_connector_atom_dig *dig_connector;
int dp_clock;
+ if ((mode->clock > 340000) &&
+ (!radeon_connector_is_dp12_capable(connector)))
+ return MODE_CLOCK_HIGH;
+
if (!radeon_connector->con_priv)
return MODE_CLOCK_HIGH;
dig_connector = radeon_connector->con_priv;
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 7bb7074..6a96517 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -183,7 +183,6 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
struct backlight_properties props;
struct radeon_backlight_privdata *pdata;
struct radeon_encoder_atom_dig *dig;
- u8 backlight_level;
char bl_name[16];
/* Mac laptops with multiple GPUs use the gmux driver for backlight
@@ -222,12 +221,17 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
pdata->encoder = radeon_encoder;
- backlight_level = radeon_atom_get_backlight_level_from_reg(rdev);
-
dig = radeon_encoder->enc_priv;
dig->bl_dev = bd;
bd->props.brightness = radeon_atom_backlight_get_brightness(bd);
+ /* Set a reasonable default here if the level is 0 otherwise
+ * fbdev will attempt to turn the backlight on after console
+ * unblanking and it will try and restore 0 which turns the backlight
+ * off again.
+ */
+ if (bd->props.brightness == 0)
+ bd->props.brightness = RADEON_MAX_BL_LEVEL;
bd->props.power = FB_BLANK_UNBLANK;
backlight_update_status(bd);
@@ -1910,8 +1914,11 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT;
else
args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
- } else
+ } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS;
+ } else {
args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
+ }
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 51e947a..0a8fc92 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -820,6 +820,9 @@ static int ci_set_thermal_temperature_range(struct radeon_device *rdev,
WREG32_SMC(CG_THERMAL_CTRL, tmp);
#endif
+ rdev->pm.dpm.thermal.min_temp = low_temp;
+ rdev->pm.dpm.thermal.max_temp = high_temp;
+
return 0;
}
@@ -1130,7 +1133,7 @@ static int ci_stop_dpm(struct radeon_device *rdev)
tmp &= ~GLOBAL_PWRMGT_EN;
WREG32_SMC(GENERAL_PWRMGT, tmp);
- tmp = RREG32(SCLK_PWRMGT_CNTL);
+ tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
tmp &= ~DYNAMIC_PM_EN;
WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
@@ -4695,7 +4698,7 @@ void ci_dpm_disable(struct radeon_device *rdev)
ci_enable_spread_spectrum(rdev, false);
ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
ci_stop_dpm(rdev);
- ci_enable_ds_master_switch(rdev, true);
+ ci_enable_ds_master_switch(rdev, false);
ci_enable_ulv(rdev, false);
ci_clear_vc(rdev);
ci_reset_to_default(rdev);
@@ -5098,6 +5101,10 @@ int ci_dpm_init(struct radeon_device *rdev)
pi->mclk_dpm_key_disabled = 0;
pi->pcie_dpm_key_disabled = 0;
+ /* mclk dpm is unstable on some R7 260X cards */
+ if (rdev->pdev->device == 0x6658)
+ pi->mclk_dpm_key_disabled = 1;
+
pi->caps_sclk_ds = true;
pi->mclk_strobe_mode_threshold = 40000;
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 25370ac..cdc7f40 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -1092,7 +1092,7 @@ static const u32 spectre_golden_registers[] =
0x8a14, 0xf000003f, 0x00000007,
0x8b24, 0xffffffff, 0x00ffffff,
0x28350, 0x3f3f3fff, 0x00000082,
- 0x28355, 0x0000003f, 0x00000000,
+ 0x28354, 0x0000003f, 0x00000000,
0x3e78, 0x00000001, 0x00000002,
0x913c, 0xffff03df, 0x00000004,
0xc768, 0x00000008, 0x00000008,
@@ -2745,6 +2745,7 @@ static void cik_gpu_init(struct radeon_device *rdev)
(rdev->pdev->device == 0x130B) ||
(rdev->pdev->device == 0x130E) ||
(rdev->pdev->device == 0x1315) ||
+ (rdev->pdev->device == 0x1318) ||
(rdev->pdev->device == 0x131B)) {
rdev->config.cik.max_cu_per_sh = 4;
rdev->config.cik.max_backends_per_se = 1;
@@ -3342,8 +3343,8 @@ static int cik_cp_gfx_start(struct radeon_device *rdev)
/* init the CE partitions. CE only used for gfx on CIK */
radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
- radeon_ring_write(ring, 0xc000);
- radeon_ring_write(ring, 0xc000);
+ radeon_ring_write(ring, 0x8000);
+ radeon_ring_write(ring, 0x8000);
/* setup clear context state */
radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
@@ -3763,7 +3764,7 @@ struct bonaire_mqd
*/
static int cik_cp_compute_resume(struct radeon_device *rdev)
{
- int r, i, idx;
+ int r, i, j, idx;
u32 tmp;
bool use_doorbell = true;
u64 hqd_gpu_addr;
@@ -3886,7 +3887,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
mqd->queue_state.cp_hqd_pq_wptr= 0;
if (RREG32(CP_HQD_ACTIVE) & 1) {
WREG32(CP_HQD_DEQUEUE_REQUEST, 1);
- for (i = 0; i < rdev->usec_timeout; i++) {
+ for (j = 0; j < rdev->usec_timeout; j++) {
if (!(RREG32(CP_HQD_ACTIVE) & 1))
break;
udelay(1);
@@ -4768,12 +4769,13 @@ static void cik_vm_decode_fault(struct radeon_device *rdev,
void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
{
struct radeon_ring *ring = &rdev->ring[ridx];
+ int usepfp = (ridx == RADEON_RING_TYPE_GFX_INDEX);
if (vm == NULL)
return;
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
WRITE_DATA_DST_SEL(0)));
if (vm->id < 8) {
radeon_ring_write(ring,
@@ -4832,7 +4834,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
radeon_ring_write(ring, 1 << vm->id);
/* compute doesn't have PFP */
- if (ridx == RADEON_RING_TYPE_GFX_INDEX) {
+ if (usepfp) {
/* sync PFP to ME, otherwise we might get invalid PFP reads */
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
radeon_ring_write(ring, 0x0);
@@ -5165,6 +5167,7 @@ static void cik_enable_mgcg(struct radeon_device *rdev, bool enable)
}
orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
+ data |= 0x00000001;
data &= 0xfffffffd;
if (orig != data)
WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
@@ -5196,7 +5199,7 @@ static void cik_enable_mgcg(struct radeon_device *rdev, bool enable)
}
} else {
orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
- data |= 0x00000002;
+ data |= 0x00000003;
if (orig != data)
WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
@@ -5956,6 +5959,19 @@ static void cik_disable_interrupt_state(struct radeon_device *rdev)
WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
}
+ /* pflip */
+ if (rdev->num_crtc >= 2) {
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+ }
+ if (rdev->num_crtc >= 4) {
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ }
+ if (rdev->num_crtc >= 6) {
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+ }
/* dac hotplug */
WREG32(DAC_AUTODETECT_INT_CONTROL, 0);
@@ -6312,6 +6328,25 @@ int cik_irq_set(struct radeon_device *rdev)
WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
}
+ if (rdev->num_crtc >= 2) {
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_MASK);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_MASK);
+ }
+ if (rdev->num_crtc >= 4) {
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_MASK);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_MASK);
+ }
+ if (rdev->num_crtc >= 6) {
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_MASK);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_MASK);
+ }
+
WREG32(DC_HPD1_INT_CONTROL, hpd1);
WREG32(DC_HPD2_INT_CONTROL, hpd2);
WREG32(DC_HPD3_INT_CONTROL, hpd3);
@@ -6348,6 +6383,29 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
rdev->irq.stat_regs.cik.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
rdev->irq.stat_regs.cik.disp_int_cont6 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE6);
+ rdev->irq.stat_regs.cik.d1grph_int = RREG32(GRPH_INT_STATUS +
+ EVERGREEN_CRTC0_REGISTER_OFFSET);
+ rdev->irq.stat_regs.cik.d2grph_int = RREG32(GRPH_INT_STATUS +
+ EVERGREEN_CRTC1_REGISTER_OFFSET);
+ if (rdev->num_crtc >= 4) {
+ rdev->irq.stat_regs.cik.d3grph_int = RREG32(GRPH_INT_STATUS +
+ EVERGREEN_CRTC2_REGISTER_OFFSET);
+ rdev->irq.stat_regs.cik.d4grph_int = RREG32(GRPH_INT_STATUS +
+ EVERGREEN_CRTC3_REGISTER_OFFSET);
+ }
+ if (rdev->num_crtc >= 6) {
+ rdev->irq.stat_regs.cik.d5grph_int = RREG32(GRPH_INT_STATUS +
+ EVERGREEN_CRTC4_REGISTER_OFFSET);
+ rdev->irq.stat_regs.cik.d6grph_int = RREG32(GRPH_INT_STATUS +
+ EVERGREEN_CRTC5_REGISTER_OFFSET);
+ }
+
+ if (rdev->irq.stat_regs.cik.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.cik.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_CLEAR);
if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT)
WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT)
@@ -6358,6 +6416,12 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
if (rdev->num_crtc >= 4) {
+ if (rdev->irq.stat_regs.cik.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.cik.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_CLEAR);
if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
@@ -6369,6 +6433,12 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
}
if (rdev->num_crtc >= 6) {
+ if (rdev->irq.stat_regs.cik.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.cik.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_CLEAR);
if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
@@ -6487,6 +6557,7 @@ static inline u32 cik_get_ih_wptr(struct radeon_device *rdev)
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
+ wptr &= ~RB_OVERFLOW;
}
return (wptr & rdev->ih.ptr_mask);
}
@@ -6720,6 +6791,15 @@ restart_ih:
break;
}
break;
+ case 8: /* D1 page flip */
+ case 10: /* D2 page flip */
+ case 12: /* D3 page flip */
+ case 14: /* D4 page flip */
+ case 16: /* D5 page flip */
+ case 18: /* D6 page flip */
+ DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
+ radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
+ break;
case 42: /* HPD hotplug */
switch (src_data) {
case 0:
@@ -8026,6 +8106,9 @@ void dce8_bandwidth_update(struct radeon_device *rdev)
u32 num_heads = 0, lb_size;
int i;
+ if (!rdev->mode_info.mode_config_initialized)
+ return;
+
radeon_update_display_priority(rdev);
for (i = 0; i < rdev->num_crtc; i++) {
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index d565f40..dc055d4 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -369,13 +369,6 @@ int cik_sdma_resume(struct radeon_device *rdev)
{
int r;
- /* Reset dma */
- WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1);
- RREG32(SRBM_SOFT_RESET);
- udelay(50);
- WREG32(SRBM_SOFT_RESET, 0);
- RREG32(SRBM_SOFT_RESET);
-
r = cik_sdma_load_microcode(rdev);
if (r)
return r;
@@ -512,7 +505,7 @@ int cik_sdma_ring_test(struct radeon_device *rdev,
tmp = 0xCAFEDEAD;
writel(tmp, ptr);
- r = radeon_ring_lock(rdev, ring, 4);
+ r = radeon_ring_lock(rdev, ring, 5);
if (r) {
DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
return r;
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index 203d2a0..9c8ef20 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -868,6 +868,15 @@
# define DC_HPD6_RX_INTERRUPT (1 << 18)
#define DISP_INTERRUPT_STATUS_CONTINUE6 0x6780
+/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */
+#define GRPH_INT_STATUS 0x6858
+# define GRPH_PFLIP_INT_OCCURRED (1 << 0)
+# define GRPH_PFLIP_INT_CLEAR (1 << 8)
+/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */
+#define GRPH_INT_CONTROL 0x685c
+# define GRPH_PFLIP_INT_MASK (1 << 0)
+# define GRPH_PFLIP_INT_TYPE (1 << 8)
+
#define DAC_AUTODETECT_INT_CONTROL 0x67c8
#define DC_HPD1_INT_STATUS 0x601c
@@ -1686,12 +1695,12 @@
#define EOP_TC_WB_ACTION_EN (1 << 15) /* L2 */
#define EOP_TCL1_ACTION_EN (1 << 16)
#define EOP_TC_ACTION_EN (1 << 17) /* L2 */
+#define EOP_TCL2_VOLATILE (1 << 24)
#define EOP_CACHE_POLICY(x) ((x) << 25)
/* 0 - LRU
* 1 - Stream
* 2 - Bypass
*/
-#define EOP_TCL2_VOLATILE (1 << 27)
#define DATA_SEL(x) ((x) << 29)
/* 0 - discard
* 1 - send low 32bit data
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
index 91bb470..7143783 100644
--- a/drivers/gpu/drm/radeon/cypress_dpm.c
+++ b/drivers/gpu/drm/radeon/cypress_dpm.c
@@ -1549,7 +1549,7 @@ int cypress_populate_smc_voltage_tables(struct radeon_device *rdev,
table->voltageMaskTable.highMask[RV770_SMC_VOLTAGEMASK_VDDCI] = 0;
table->voltageMaskTable.lowMask[RV770_SMC_VOLTAGEMASK_VDDCI] =
- cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
+ cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
}
return 0;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index c429bb9..20b00a0 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -188,7 +188,7 @@ static const u32 evergreen_golden_registers[] =
0x8c1c, 0xffffffff, 0x00001010,
0x28350, 0xffffffff, 0x00000000,
0xa008, 0xffffffff, 0x00010000,
- 0x5cc, 0xffffffff, 0x00000001,
+ 0x5c4, 0xffffffff, 0x00000001,
0x9508, 0xffffffff, 0x00000002,
0x913c, 0x0000000f, 0x0000000a
};
@@ -475,7 +475,7 @@ static const u32 cedar_golden_registers[] =
0x8c1c, 0xffffffff, 0x00001010,
0x28350, 0xffffffff, 0x00000000,
0xa008, 0xffffffff, 0x00010000,
- 0x5cc, 0xffffffff, 0x00000001,
+ 0x5c4, 0xffffffff, 0x00000001,
0x9508, 0xffffffff, 0x00000002
};
@@ -634,7 +634,7 @@ static const u32 juniper_mgcg_init[] =
static const u32 supersumo_golden_registers[] =
{
0x5eb4, 0xffffffff, 0x00000002,
- 0x5cc, 0xffffffff, 0x00000001,
+ 0x5c4, 0xffffffff, 0x00000001,
0x7030, 0xffffffff, 0x00000011,
0x7c30, 0xffffffff, 0x00000011,
0x6104, 0x01000300, 0x00000000,
@@ -718,7 +718,7 @@ static const u32 sumo_golden_registers[] =
static const u32 wrestler_golden_registers[] =
{
0x5eb4, 0xffffffff, 0x00000002,
- 0x5cc, 0xffffffff, 0x00000001,
+ 0x5c4, 0xffffffff, 0x00000001,
0x7030, 0xffffffff, 0x00000011,
0x7c30, 0xffffffff, 0x00000011,
0x6104, 0x01000300, 0x00000000,
@@ -2312,6 +2312,9 @@ void evergreen_bandwidth_update(struct radeon_device *rdev)
u32 num_heads = 0, lb_size;
int i;
+ if (!rdev->mode_info.mode_config_initialized)
+ return;
+
radeon_update_display_priority(rdev);
for (i = 0; i < rdev->num_crtc; i++) {
@@ -2520,6 +2523,7 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
}
} else {
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
@@ -4276,7 +4280,6 @@ int evergreen_irq_set(struct radeon_device *rdev)
u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
u32 grbm_int_cntl = 0;
- u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0;
u32 dma_cntl, dma_cntl1 = 0;
u32 thermal_int = 0;
@@ -4459,15 +4462,21 @@ int evergreen_irq_set(struct radeon_device *rdev)
WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
}
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_MASK);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_MASK);
if (rdev->num_crtc >= 4) {
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_MASK);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_MASK);
}
if (rdev->num_crtc >= 6) {
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_MASK);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_MASK);
}
WREG32(DC_HPD1_INT_CONTROL, hpd1);
@@ -4659,6 +4668,7 @@ static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
+ wptr &= ~RB_OVERFLOW;
}
return (wptr & rdev->ih.ptr_mask);
}
@@ -4856,6 +4866,15 @@ restart_ih:
break;
}
break;
+ case 8: /* D1 page flip */
+ case 10: /* D2 page flip */
+ case 12: /* D3 page flip */
+ case 14: /* D4 page flip */
+ case 16: /* D5 page flip */
+ case 18: /* D6 page flip */
+ DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
+ radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
+ break;
case 42: /* HPD hotplug */
switch (src_data) {
case 0:
diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c
index dd6e968..d0e4ab1 100644
--- a/drivers/gpu/drm/radeon/ni_dma.c
+++ b/drivers/gpu/drm/radeon/ni_dma.c
@@ -119,12 +119,6 @@ int cayman_dma_resume(struct radeon_device *rdev)
u32 reg_offset, wb_offset;
int i, r;
- /* Reset dma */
- WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
- RREG32(SRBM_SOFT_RESET);
- udelay(50);
- WREG32(SRBM_SOFT_RESET, 0);
-
for (i = 0; i < 2; i++) {
if (i == 0) {
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index db0fa61..85f36e7 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -1319,7 +1319,7 @@ static void ni_populate_smc_voltage_tables(struct radeon_device *rdev,
table->voltageMaskTable.highMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] = 0;
table->voltageMaskTable.lowMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] =
- cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
+ cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
}
}
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index d713330..f98dcbe 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -3189,6 +3189,9 @@ void r100_bandwidth_update(struct radeon_device *rdev)
uint32_t pixel_bytes1 = 0;
uint32_t pixel_bytes2 = 0;
+ if (!rdev->mode_info.mode_config_initialized)
+ return;
+
radeon_update_display_priority(rdev);
if (rdev->mode_info.crtcs[0]->base.enabled) {
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 5af2729..88eb936 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -3371,7 +3371,6 @@ int r600_irq_set(struct radeon_device *rdev)
u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
u32 grbm_int_cntl = 0;
u32 hdmi0, hdmi1;
- u32 d1grph = 0, d2grph = 0;
u32 dma_cntl;
u32 thermal_int = 0;
@@ -3480,8 +3479,8 @@ int r600_irq_set(struct radeon_device *rdev)
WREG32(CP_INT_CNTL, cp_int_cntl);
WREG32(DMA_CNTL, dma_cntl);
WREG32(DxMODE_INT_MASK, mode_int);
- WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
- WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
+ WREG32(D1GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK);
+ WREG32(D2GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK);
WREG32(GRBM_INT_CNTL, grbm_int_cntl);
if (ASIC_IS_DCE3(rdev)) {
WREG32(DC_HPD1_INT_CONTROL, hpd1);
@@ -3658,6 +3657,7 @@ static u32 r600_get_ih_wptr(struct radeon_device *rdev)
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
+ wptr &= ~RB_OVERFLOW;
}
return (wptr & rdev->ih.ptr_mask);
}
@@ -3784,6 +3784,14 @@ restart_ih:
break;
}
break;
+ case 9: /* D1 pflip */
+ DRM_DEBUG("IH: D1 flip\n");
+ radeon_crtc_handle_flip(rdev, 0);
+ break;
+ case 11: /* D2 pflip */
+ DRM_DEBUG("IH: D2 flip\n");
+ radeon_crtc_handle_flip(rdev, 1);
+ break;
case 19: /* HPD/DAC hotplug */
switch (src_data) {
case 0:
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
index 3b31745..aad3c36 100644
--- a/drivers/gpu/drm/radeon/r600_dma.c
+++ b/drivers/gpu/drm/radeon/r600_dma.c
@@ -116,15 +116,6 @@ int r600_dma_resume(struct radeon_device *rdev)
u32 rb_bufsz;
int r;
- /* Reset dma */
- if (rdev->family >= CHIP_RV770)
- WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
- else
- WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
- RREG32(SRBM_SOFT_RESET);
- udelay(50);
- WREG32(SRBM_SOFT_RESET, 0);
-
WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index 5513d8f..cc4258a 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -158,16 +158,18 @@ u32 r600_dpm_get_vblank_time(struct radeon_device *rdev)
u32 line_time_us, vblank_lines;
u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- radeon_crtc = to_radeon_crtc(crtc);
- if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
- line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) /
- radeon_crtc->hw_mode.clock;
- vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end -
- radeon_crtc->hw_mode.crtc_vdisplay +
- (radeon_crtc->v_border * 2);
- vblank_time_us = vblank_lines * line_time_us;
- break;
+ if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
+ line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) /
+ radeon_crtc->hw_mode.clock;
+ vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end -
+ radeon_crtc->hw_mode.crtc_vdisplay +
+ (radeon_crtc->v_border * 2);
+ vblank_time_us = vblank_lines * line_time_us;
+ break;
+ }
}
}
@@ -181,14 +183,15 @@ u32 r600_dpm_get_vrefresh(struct radeon_device *rdev)
struct radeon_crtc *radeon_crtc;
u32 vrefresh = 0;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- radeon_crtc = to_radeon_crtc(crtc);
- if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
- vrefresh = radeon_crtc->hw_mode.vrefresh;
- break;
+ if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
+ vrefresh = radeon_crtc->hw_mode.vrefresh;
+ break;
+ }
}
}
-
return vrefresh;
}
@@ -1190,7 +1193,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
- ppt->usMaximumPowerDeliveryLimit;
+ le16_to_cpu(ppt->usMaximumPowerDeliveryLimit);
pt = &ppt->power_tune_table;
} else {
ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index b11433f..5c903a8 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -715,6 +715,12 @@ struct cik_irq_stat_regs {
u32 disp_int_cont4;
u32 disp_int_cont5;
u32 disp_int_cont6;
+ u32 d1grph_int;
+ u32 d2grph_int;
+ u32 d3grph_int;
+ u32 d4grph_int;
+ u32 d5grph_int;
+ u32 d6grph_int;
};
union radeon_irq_stat_regs {
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index dfa6412..0f538a4 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -464,6 +464,13 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
}
}
+ /* Fujitsu D3003-S2 board lists DVI-I as DVI-I and VGA */
+ if ((dev->pdev->device == 0x9805) &&
+ (dev->pdev->subsystem_vendor == 0x1734) &&
+ (dev->pdev->subsystem_device == 0x11bd)) {
+ if (*connector_type == DRM_MODE_CONNECTOR_VGA)
+ return false;
+ }
return true;
}
@@ -1963,7 +1970,7 @@ static const char *thermal_controller_names[] = {
"adm1032",
"adm1030",
"max6649",
- "lm64",
+ "lm63", /* lm64 */
"f75375",
"asc7xxx",
};
@@ -1974,7 +1981,7 @@ static const char *pp_lib_thermal_controller_names[] = {
"adm1032",
"adm1030",
"max6649",
- "lm64",
+ "lm63", /* lm64 */
"f75375",
"RV6xx",
"RV770",
@@ -2281,19 +2288,31 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
rdev->pm.int_thermal_type = THERMAL_TYPE_KV;
- } else if ((controller->ucType ==
- ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
- (controller->ucType ==
- ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) ||
- (controller->ucType ==
- ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL)) {
- DRM_INFO("Special thermal controller config\n");
+ } else if (controller->ucType ==
+ ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
+ DRM_INFO("External GPIO thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ rdev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
+ } else if (controller->ucType ==
+ ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
+ DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ rdev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
+ } else if (controller->ucType ==
+ ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
+ DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ rdev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
} else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
pp_lib_thermal_controller_names[controller->ucType],
controller->ucI2cAddress >> 1,
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ rdev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine);
rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
if (rdev->pm.i2c_bus) {
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index b8db0d7..7c6e3fd 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -525,6 +525,13 @@ static bool radeon_atpx_detect(void)
has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true);
}
+ /* some newer PX laptops mark the dGPU as a non-VGA display device */
+ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
+ vga_count++;
+
+ has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true);
+ }
+
if (has_atpx && vga_count == 2) {
acpi_get_name(radeon_atpx_priv.atpx.handle, ACPI_FULL_PATHNAME, &buffer);
printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 061b227..b131520 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -196,6 +196,20 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
}
}
+ if (!found) {
+ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
+ dhandle = ACPI_HANDLE(&pdev->dev);
+ if (!dhandle)
+ continue;
+
+ status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
+ if (!ACPI_FAILURE(status)) {
+ found = true;
+ break;
+ }
+ }
+ }
+
if (!found)
return false;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 6456573..fe90b3e 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1360,7 +1360,7 @@ bool radeon_connector_is_dp12_capable(struct drm_connector *connector)
struct radeon_device *rdev = dev->dev_private;
if (ASIC_IS_DCE5(rdev) &&
- (rdev->clock.dp_extclk >= 53900) &&
+ (rdev->clock.default_dispclk >= 53900) &&
radeon_connector_encoder_is_hbr2(connector)) {
return true;
}
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 80285e3..ed9a997 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -97,6 +97,12 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
uint32_t domain = r->write_domain ?
r->write_domain : r->read_domains;
+ if (domain & RADEON_GEM_DOMAIN_CPU) {
+ DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid "
+ "for command submission\n");
+ return -EINVAL;
+ }
+
p->relocs[i].lobj.domain = domain;
if (domain == RADEON_GEM_DOMAIN_VRAM)
domain |= RADEON_GEM_DOMAIN_GTT;
@@ -271,10 +277,17 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
return -EINVAL;
/* we only support VM on some SI+ rings */
- if ((p->rdev->asic->ring[p->ring]->cs_parse == NULL) &&
- ((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
- DRM_ERROR("Ring %d requires VM!\n", p->ring);
- return -EINVAL;
+ if ((p->cs_flags & RADEON_CS_USE_VM) == 0) {
+ if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) {
+ DRM_ERROR("Ring %d requires VM!\n", p->ring);
+ return -EINVAL;
+ }
+ } else {
+ if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) {
+ DRM_ERROR("VM not supported on ring %d!\n",
+ p->ring);
+ return -EINVAL;
+ }
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 0d1aa05..9a19a04 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -281,6 +281,10 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
u32 update_pending;
int vpos, hpos;
+ /* can happen during initialization */
+ if (radeon_crtc == NULL)
+ return;
+
spin_lock_irqsave(&rdev->ddev->event_lock, flags);
work = radeon_crtc->unpin_work;
if (work == NULL ||
@@ -704,6 +708,10 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
struct radeon_device *rdev = dev->dev_private;
int ret = 0;
+ /* don't leak the edid if we already fetched it in detect() */
+ if (radeon_connector->edid)
+ goto got_edid;
+
/* on hw with routers, select right port */
if (radeon_connector->router.ddc_valid)
radeon_router_select_ddc_port(radeon_connector);
@@ -743,8 +751,10 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
}
if (radeon_connector->edid) {
+got_edid:
drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
+ drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid);
return ret;
}
drm_mode_connector_update_edid_property(&radeon_connector->base, NULL);
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index cc9e848..a1a8430 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -196,6 +196,16 @@ static bool radeon_msi_ok(struct radeon_device *rdev)
if (rdev->flags & RADEON_IS_AGP)
return false;
+ /*
+ * Older chips have a HW limitation, they can only generate 40 bits
+ * of address for "64-bit" MSIs which breaks on some platforms, notably
+ * IBM POWER servers, so we limit them
+ */
+ if (rdev->family < CHIP_BONAIRE) {
+ dev_info(rdev->dev, "radeon: MSI limited to 32-bit\n");
+ rdev->pdev->no_64bit_msi = 1;
+ }
+
/* force MSI on */
if (radeon_msi == 1)
return true;
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 7456ce1..03ff672 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -501,8 +501,11 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
radeon_vm_init(rdev, &fpriv->vm);
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
- if (r)
+ if (r) {
+ radeon_vm_fini(rdev, &fpriv->vm);
+ kfree(fpriv);
return r;
+ }
/* map the ib pool buffer read only into
* virtual address space */
@@ -681,6 +684,8 @@ int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
/* Get associated drm_crtc: */
drmcrtc = &rdev->mode_info.crtcs[crtc]->base;
+ if (!drmcrtc)
+ return -EINVAL;
/* Helper routine in DRM core does all the work: */
return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index c0fa4aa..315b8e2 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -586,22 +586,30 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
rbo = container_of(bo, struct radeon_bo, tbo);
radeon_bo_check_tiling(rbo, 0, 0);
rdev = rbo->rdev;
- if (bo->mem.mem_type == TTM_PL_VRAM) {
- size = bo->mem.num_pages << PAGE_SHIFT;
- offset = bo->mem.start << PAGE_SHIFT;
- if ((offset + size) > rdev->mc.visible_vram_size) {
- /* hurrah the memory is not visible ! */
- radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
- rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
- r = ttm_bo_validate(bo, &rbo->placement, false, false);
- if (unlikely(r != 0))
- return r;
- offset = bo->mem.start << PAGE_SHIFT;
- /* this should not happen */
- if ((offset + size) > rdev->mc.visible_vram_size)
- return -EINVAL;
- }
+ if (bo->mem.mem_type != TTM_PL_VRAM)
+ return 0;
+
+ size = bo->mem.num_pages << PAGE_SHIFT;
+ offset = bo->mem.start << PAGE_SHIFT;
+ if ((offset + size) <= rdev->mc.visible_vram_size)
+ return 0;
+
+ /* hurrah the memory is not visible ! */
+ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
+ rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
+ r = ttm_bo_validate(bo, &rbo->placement, false, false);
+ if (unlikely(r == -ENOMEM)) {
+ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
+ return ttm_bo_validate(bo, &rbo->placement, false, false);
+ } else if (unlikely(r != 0)) {
+ return r;
}
+
+ offset = bo->mem.start << PAGE_SHIFT;
+ /* this should never happen */
+ if ((offset + size) > rdev->mc.visible_vram_size)
+ return -EINVAL;
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index a0ec4bb..10fc977 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -1362,12 +1362,14 @@ static void radeon_pm_compute_clocks_old(struct radeon_device *rdev)
rdev->pm.active_crtcs = 0;
rdev->pm.active_crtc_count = 0;
- list_for_each_entry(crtc,
- &ddev->mode_config.crtc_list, head) {
- radeon_crtc = to_radeon_crtc(crtc);
- if (radeon_crtc->enabled) {
- rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
- rdev->pm.active_crtc_count++;
+ if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
+ list_for_each_entry(crtc,
+ &ddev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (radeon_crtc->enabled) {
+ rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
+ rdev->pm.active_crtc_count++;
+ }
}
}
@@ -1431,12 +1433,14 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
/* update active crtc counts */
rdev->pm.dpm.new_active_crtcs = 0;
rdev->pm.dpm.new_active_crtc_count = 0;
- list_for_each_entry(crtc,
- &ddev->mode_config.crtc_list, head) {
- radeon_crtc = to_radeon_crtc(crtc);
- if (crtc->enabled) {
- rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
- rdev->pm.dpm.new_active_crtc_count++;
+ if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
+ list_for_each_entry(crtc,
+ &ddev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (crtc->enabled) {
+ rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
+ rdev->pm.dpm.new_active_crtc_count++;
+ }
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 84323c9..02d3c38 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -189,7 +189,7 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
rbo = container_of(bo, struct radeon_bo, tbo);
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
- if (rbo->rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready == false)
+ if (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == false)
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
else
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
diff --git a/drivers/gpu/drm/radeon/radeon_ucode.h b/drivers/gpu/drm/radeon/radeon_ucode.h
index 3385836..7e48c35 100644
--- a/drivers/gpu/drm/radeon/radeon_ucode.h
+++ b/drivers/gpu/drm/radeon/radeon_ucode.h
@@ -57,6 +57,9 @@
#define BTC_MC_UCODE_SIZE 6024
#define CAYMAN_MC_UCODE_SIZE 6037
#define SI_MC_UCODE_SIZE 7769
+#define TAHITI_MC_UCODE_SIZE 7808
+#define PITCAIRN_MC_UCODE_SIZE 7775
+#define VERDE_MC_UCODE_SIZE 7875
#define OLAND_MC_UCODE_SIZE 7863
#define CIK_MC_UCODE_SIZE 7866
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 8393647..a656b1a 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -464,6 +464,10 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
cmd = radeon_get_ib_value(p, p->idx) >> 1;
if (cmd < 0x4) {
+ if (end <= start) {
+ DRM_ERROR("invalid reloc offset %X!\n", offset);
+ return -EINVAL;
+ }
if ((end - start) < buf_sizes[cmd]) {
DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
(unsigned)(end - start), buf_sizes[cmd]);
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 6acba80..bbe8459 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -582,8 +582,10 @@ int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
return -EINVAL;
}
addr = addr & 0xFFFFFFFFFFFFF000ULL;
- addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED;
- addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE;
+ if (addr != rdev->dummy_page.addr)
+ addr |= R600_PTE_VALID | R600_PTE_READABLE |
+ R600_PTE_WRITEABLE;
+ addr |= R600_PTE_SYSTEM | R600_PTE_SNOOPED;
writeq(addr, ptr + (i * 8));
return 0;
}
@@ -824,6 +826,9 @@ void rs600_bandwidth_update(struct radeon_device *rdev)
u32 d1mode_priority_a_cnt, d2mode_priority_a_cnt;
/* FIXME: implement full support */
+ if (!rdev->mode_info.mode_config_initialized)
+ return;
+
radeon_update_display_priority(rdev);
if (rdev->mode_info.crtcs[0]->base.enabled)
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 3c38f0a..d33b4ad 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -585,6 +585,9 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
u32 d1mode_priority_a_cnt, d1mode_priority_b_cnt;
u32 d2mode_priority_a_cnt, d2mode_priority_b_cnt;
+ if (!rdev->mode_info.mode_config_initialized)
+ return;
+
radeon_update_display_priority(rdev);
if (rdev->mode_info.crtcs[0]->base.enabled)
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 873eb4b..9de81c5 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -1279,6 +1279,9 @@ void rv515_bandwidth_update(struct radeon_device *rdev)
struct drm_display_mode *mode0 = NULL;
struct drm_display_mode *mode1 = NULL;
+ if (!rdev->mode_info.mode_config_initialized)
+ return;
+
radeon_update_display_priority(rdev);
if (rdev->mode_info.crtcs[0]->base.enabled)
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index a239b30..890cf17 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -2328,12 +2328,6 @@ void rv770_get_engine_memory_ss(struct radeon_device *rdev)
pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
ASIC_INTERNAL_MEMORY_SS, 0);
- /* disable ss, causes hangs on some cayman boards */
- if (rdev->family == CHIP_CAYMAN) {
- pi->sclk_ss = false;
- pi->mclk_ss = false;
- }
-
if (pi->sclk_ss || pi->mclk_ss)
pi->dynamic_ss = true;
else
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 873e0a6..50482e7 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -39,30 +39,35 @@ MODULE_FIRMWARE("radeon/TAHITI_pfp.bin");
MODULE_FIRMWARE("radeon/TAHITI_me.bin");
MODULE_FIRMWARE("radeon/TAHITI_ce.bin");
MODULE_FIRMWARE("radeon/TAHITI_mc.bin");
+MODULE_FIRMWARE("radeon/TAHITI_mc2.bin");
MODULE_FIRMWARE("radeon/TAHITI_rlc.bin");
MODULE_FIRMWARE("radeon/TAHITI_smc.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_me.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_ce.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_mc.bin");
+MODULE_FIRMWARE("radeon/PITCAIRN_mc2.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_rlc.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_smc.bin");
MODULE_FIRMWARE("radeon/VERDE_pfp.bin");
MODULE_FIRMWARE("radeon/VERDE_me.bin");
MODULE_FIRMWARE("radeon/VERDE_ce.bin");
MODULE_FIRMWARE("radeon/VERDE_mc.bin");
+MODULE_FIRMWARE("radeon/VERDE_mc2.bin");
MODULE_FIRMWARE("radeon/VERDE_rlc.bin");
MODULE_FIRMWARE("radeon/VERDE_smc.bin");
MODULE_FIRMWARE("radeon/OLAND_pfp.bin");
MODULE_FIRMWARE("radeon/OLAND_me.bin");
MODULE_FIRMWARE("radeon/OLAND_ce.bin");
MODULE_FIRMWARE("radeon/OLAND_mc.bin");
+MODULE_FIRMWARE("radeon/OLAND_mc2.bin");
MODULE_FIRMWARE("radeon/OLAND_rlc.bin");
MODULE_FIRMWARE("radeon/OLAND_smc.bin");
MODULE_FIRMWARE("radeon/HAINAN_pfp.bin");
MODULE_FIRMWARE("radeon/HAINAN_me.bin");
MODULE_FIRMWARE("radeon/HAINAN_ce.bin");
MODULE_FIRMWARE("radeon/HAINAN_mc.bin");
+MODULE_FIRMWARE("radeon/HAINAN_mc2.bin");
MODULE_FIRMWARE("radeon/HAINAN_rlc.bin");
MODULE_FIRMWARE("radeon/HAINAN_smc.bin");
@@ -1470,36 +1475,33 @@ static int si_mc_load_microcode(struct radeon_device *rdev)
const __be32 *fw_data;
u32 running, blackout = 0;
u32 *io_mc_regs;
- int i, ucode_size, regs_size;
+ int i, regs_size, ucode_size;
if (!rdev->mc_fw)
return -EINVAL;
+ ucode_size = rdev->mc_fw->size / 4;
+
switch (rdev->family) {
case CHIP_TAHITI:
io_mc_regs = (u32 *)&tahiti_io_mc_regs;
- ucode_size = SI_MC_UCODE_SIZE;
regs_size = TAHITI_IO_MC_REGS_SIZE;
break;
case CHIP_PITCAIRN:
io_mc_regs = (u32 *)&pitcairn_io_mc_regs;
- ucode_size = SI_MC_UCODE_SIZE;
regs_size = TAHITI_IO_MC_REGS_SIZE;
break;
case CHIP_VERDE:
default:
io_mc_regs = (u32 *)&verde_io_mc_regs;
- ucode_size = SI_MC_UCODE_SIZE;
regs_size = TAHITI_IO_MC_REGS_SIZE;
break;
case CHIP_OLAND:
io_mc_regs = (u32 *)&oland_io_mc_regs;
- ucode_size = OLAND_MC_UCODE_SIZE;
regs_size = TAHITI_IO_MC_REGS_SIZE;
break;
case CHIP_HAINAN:
io_mc_regs = (u32 *)&hainan_io_mc_regs;
- ucode_size = OLAND_MC_UCODE_SIZE;
regs_size = TAHITI_IO_MC_REGS_SIZE;
break;
}
@@ -1555,7 +1557,7 @@ static int si_init_microcode(struct radeon_device *rdev)
const char *chip_name;
const char *rlc_chip_name;
size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size;
- size_t smc_req_size;
+ size_t smc_req_size, mc2_req_size;
char fw_name[30];
int err;
@@ -1570,6 +1572,7 @@ static int si_init_microcode(struct radeon_device *rdev)
ce_req_size = SI_CE_UCODE_SIZE * 4;
rlc_req_size = SI_RLC_UCODE_SIZE * 4;
mc_req_size = SI_MC_UCODE_SIZE * 4;
+ mc2_req_size = TAHITI_MC_UCODE_SIZE * 4;
smc_req_size = ALIGN(TAHITI_SMC_UCODE_SIZE, 4);
break;
case CHIP_PITCAIRN:
@@ -1580,6 +1583,7 @@ static int si_init_microcode(struct radeon_device *rdev)
ce_req_size = SI_CE_UCODE_SIZE * 4;
rlc_req_size = SI_RLC_UCODE_SIZE * 4;
mc_req_size = SI_MC_UCODE_SIZE * 4;
+ mc2_req_size = PITCAIRN_MC_UCODE_SIZE * 4;
smc_req_size = ALIGN(PITCAIRN_SMC_UCODE_SIZE, 4);
break;
case CHIP_VERDE:
@@ -1590,6 +1594,7 @@ static int si_init_microcode(struct radeon_device *rdev)
ce_req_size = SI_CE_UCODE_SIZE * 4;
rlc_req_size = SI_RLC_UCODE_SIZE * 4;
mc_req_size = SI_MC_UCODE_SIZE * 4;
+ mc2_req_size = VERDE_MC_UCODE_SIZE * 4;
smc_req_size = ALIGN(VERDE_SMC_UCODE_SIZE, 4);
break;
case CHIP_OLAND:
@@ -1599,7 +1604,7 @@ static int si_init_microcode(struct radeon_device *rdev)
me_req_size = SI_PM4_UCODE_SIZE * 4;
ce_req_size = SI_CE_UCODE_SIZE * 4;
rlc_req_size = SI_RLC_UCODE_SIZE * 4;
- mc_req_size = OLAND_MC_UCODE_SIZE * 4;
+ mc_req_size = mc2_req_size = OLAND_MC_UCODE_SIZE * 4;
smc_req_size = ALIGN(OLAND_SMC_UCODE_SIZE, 4);
break;
case CHIP_HAINAN:
@@ -1609,7 +1614,7 @@ static int si_init_microcode(struct radeon_device *rdev)
me_req_size = SI_PM4_UCODE_SIZE * 4;
ce_req_size = SI_CE_UCODE_SIZE * 4;
rlc_req_size = SI_RLC_UCODE_SIZE * 4;
- mc_req_size = OLAND_MC_UCODE_SIZE * 4;
+ mc_req_size = mc2_req_size = OLAND_MC_UCODE_SIZE * 4;
smc_req_size = ALIGN(HAINAN_SMC_UCODE_SIZE, 4);
break;
default: BUG();
@@ -1662,16 +1667,22 @@ static int si_init_microcode(struct radeon_device *rdev)
err = -EINVAL;
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->mc_fw->size != mc_req_size) {
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+ err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ }
+ if ((rdev->mc_fw->size != mc_req_size) &&
+ (rdev->mc_fw->size != mc2_req_size)) {
printk(KERN_ERR
"si_mc: Bogus length %zu in firmware \"%s\"\n",
rdev->mc_fw->size, fw_name);
err = -EINVAL;
}
+ DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size);
snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
@@ -2219,6 +2230,9 @@ void dce6_bandwidth_update(struct radeon_device *rdev)
u32 num_heads = 0, lb_size;
int i;
+ if (!rdev->mode_info.mode_config_initialized)
+ return;
+
radeon_update_display_priority(rdev);
for (i = 0; i < rdev->num_crtc; i++) {
@@ -4746,7 +4760,7 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
/* write new base address */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
WRITE_DATA_DST_SEL(0)));
if (vm->id < 8) {
@@ -5720,7 +5734,6 @@ int si_irq_set(struct radeon_device *rdev)
u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0;
u32 grbm_int_cntl = 0;
- u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
u32 dma_cntl, dma_cntl1;
u32 thermal_int = 0;
@@ -5859,16 +5872,22 @@ int si_irq_set(struct radeon_device *rdev)
}
if (rdev->num_crtc >= 2) {
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_MASK);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_MASK);
}
if (rdev->num_crtc >= 4) {
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_MASK);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_MASK);
}
if (rdev->num_crtc >= 6) {
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_MASK);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_MASK);
}
if (!ASIC_IS_NODCE(rdev)) {
@@ -6025,6 +6044,7 @@ static inline u32 si_get_ih_wptr(struct radeon_device *rdev)
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
+ wptr &= ~RB_OVERFLOW;
}
return (wptr & rdev->ih.ptr_mask);
}
@@ -6232,6 +6252,15 @@ restart_ih:
break;
}
break;
+ case 8: /* D1 page flip */
+ case 10: /* D2 page flip */
+ case 12: /* D3 page flip */
+ case 14: /* D4 page flip */
+ case 16: /* D5 page flip */
+ case 18: /* D6 page flip */
+ DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
+ radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
+ break;
case 42: /* HPD hotplug */
switch (src_data) {
case 0:
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 83895f2..51588d3 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2901,6 +2901,22 @@ static int si_init_smc_spll_table(struct radeon_device *rdev)
return ret;
}
+struct si_dpm_quirk {
+ u32 chip_vendor;
+ u32 chip_device;
+ u32 subsys_vendor;
+ u32 subsys_device;
+ u32 max_sclk;
+ u32 max_mclk;
+};
+
+/* cards with dpm stability problems */
+static struct si_dpm_quirk si_dpm_quirk_list[] = {
+ /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
+ { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
+ { 0, 0, 0, 0 },
+};
+
static void si_apply_state_adjust_rules(struct radeon_device *rdev,
struct radeon_ps *rps)
{
@@ -2911,7 +2927,22 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
u32 mclk, sclk;
u16 vddc, vddci;
u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
+ u32 max_sclk = 0, max_mclk = 0;
int i;
+ struct si_dpm_quirk *p = si_dpm_quirk_list;
+
+ /* Apply dpm quirks */
+ while (p && p->chip_device != 0) {
+ if (rdev->pdev->vendor == p->chip_vendor &&
+ rdev->pdev->device == p->chip_device &&
+ rdev->pdev->subsystem_vendor == p->subsys_vendor &&
+ rdev->pdev->subsystem_device == p->subsys_device) {
+ max_sclk = p->max_sclk;
+ max_mclk = p->max_mclk;
+ break;
+ }
+ ++p;
+ }
if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
ni_dpm_vblank_too_short(rdev))
@@ -2965,6 +2996,14 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
if (ps->performance_levels[i].mclk > max_mclk_vddc)
ps->performance_levels[i].mclk = max_mclk_vddc;
}
+ if (max_mclk) {
+ if (ps->performance_levels[i].mclk > max_mclk)
+ ps->performance_levels[i].mclk = max_mclk;
+ }
+ if (max_sclk) {
+ if (ps->performance_levels[i].sclk > max_sclk)
+ ps->performance_levels[i].sclk = max_sclk;
+ }
}
/* XXX validate the min clocks required for display */
@@ -6220,7 +6259,7 @@ static void si_parse_pplib_clock_info(struct radeon_device *rdev,
if ((rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) &&
index == 0) {
/* XXX disable for A0 tahiti */
- si_pi->ulv.supported = true;
+ si_pi->ulv.supported = false;
si_pi->ulv.pl = *pl;
si_pi->ulv.one_pcie_lane_in_ulv = false;
si_pi->ulv.volt_change_delay = SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT;
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index d700698..bf980ea 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1868,7 +1868,16 @@ int trinity_dpm_init(struct radeon_device *rdev)
for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
pi->at[i] = TRINITY_AT_DFLT;
- pi->enable_bapm = false;
+ /* There are stability issues reported on with
+ * bapm enabled when switching between AC and battery
+ * power. At the same time, some MSI boards hang
+ * if it's not enabled and dpm is enabled. Just enable
+ * it for MSI boards right now.
+ */
+ if (rdev->pdev->subsystem_vendor == 0x1462)
+ pi->enable_bapm = true;
+ else
+ pi->enable_bapm = false;
pi->enable_nbps_policy = true;
pi->enable_sclk_ds = true;
pi->enable_gfx_power_gating = true;
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
index 7266805..f680f5f 100644
--- a/drivers/gpu/drm/radeon/uvd_v1_0.c
+++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
@@ -83,7 +83,10 @@ int uvd_v1_0_init(struct radeon_device *rdev)
int r;
/* raise clocks while booting up the VCPU */
- radeon_set_uvd_clocks(rdev, 53300, 40000);
+ if (rdev->family < CHIP_RV740)
+ radeon_set_uvd_clocks(rdev, 10000, 10000);
+ else
+ radeon_set_uvd_clocks(rdev, 53300, 40000);
r = uvd_v1_0_start(rdev);
if (r)
@@ -405,7 +408,10 @@ int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
struct radeon_fence *fence = NULL;
int r;
- r = radeon_set_uvd_clocks(rdev, 53300, 40000);
+ if (rdev->family < CHIP_RV740)
+ r = radeon_set_uvd_clocks(rdev, 10000, 10000);
+ else
+ r = radeon_set_uvd_clocks(rdev, 53300, 40000);
if (r) {
DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r);
return r;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 116da19..2b25d65 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -84,6 +84,7 @@ static int modeset_init(struct drm_device *dev)
if ((priv->num_encoders == 0) || (priv->num_connectors == 0)) {
/* oh nos! */
dev_err(dev->dev, "no encoders/connectors found\n");
+ drm_mode_config_cleanup(dev);
return -ENXIO;
}
@@ -122,6 +123,7 @@ static int tilcdc_unload(struct drm_device *dev)
struct tilcdc_drm_private *priv = dev->dev_private;
struct tilcdc_module *mod, *cur;
+ drm_fbdev_cma_fini(priv->fbdev);
drm_kms_helper_poll_fini(dev);
drm_mode_config_cleanup(dev);
drm_vblank_cleanup(dev);
@@ -177,33 +179,37 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
dev->dev_private = priv;
priv->wq = alloc_ordered_workqueue("tilcdc", 0);
+ if (!priv->wq) {
+ ret = -ENOMEM;
+ goto fail_free_priv;
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(dev->dev, "failed to get memory resource\n");
ret = -EINVAL;
- goto fail;
+ goto fail_free_wq;
}
priv->mmio = ioremap_nocache(res->start, resource_size(res));
if (!priv->mmio) {
dev_err(dev->dev, "failed to ioremap\n");
ret = -ENOMEM;
- goto fail;
+ goto fail_free_wq;
}
priv->clk = clk_get(dev->dev, "fck");
if (IS_ERR(priv->clk)) {
dev_err(dev->dev, "failed to get functional clock\n");
ret = -ENODEV;
- goto fail;
+ goto fail_iounmap;
}
priv->disp_clk = clk_get(dev->dev, "dpll_disp_ck");
if (IS_ERR(priv->clk)) {
dev_err(dev->dev, "failed to get display clock\n");
ret = -ENODEV;
- goto fail;
+ goto fail_put_clk;
}
#ifdef CONFIG_CPU_FREQ
@@ -213,7 +219,7 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
CPUFREQ_TRANSITION_NOTIFIER);
if (ret) {
dev_err(dev->dev, "failed to register cpufreq notifier\n");
- goto fail;
+ goto fail_put_disp_clk;
}
#endif
@@ -258,13 +264,13 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
ret = modeset_init(dev);
if (ret < 0) {
dev_err(dev->dev, "failed to initialize mode setting\n");
- goto fail;
+ goto fail_cpufreq_unregister;
}
ret = drm_vblank_init(dev, 1);
if (ret < 0) {
dev_err(dev->dev, "failed to initialize vblank\n");
- goto fail;
+ goto fail_mode_config_cleanup;
}
pm_runtime_get_sync(dev->dev);
@@ -272,7 +278,7 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
pm_runtime_put_sync(dev->dev);
if (ret < 0) {
dev_err(dev->dev, "failed to install IRQ handler\n");
- goto fail;
+ goto fail_vblank_cleanup;
}
platform_set_drvdata(pdev, dev);
@@ -288,13 +294,48 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
priv->fbdev = drm_fbdev_cma_init(dev, bpp,
dev->mode_config.num_crtc,
dev->mode_config.num_connector);
+ if (IS_ERR(priv->fbdev)) {
+ ret = PTR_ERR(priv->fbdev);
+ goto fail_irq_uninstall;
+ }
drm_kms_helper_poll_init(dev);
return 0;
-fail:
- tilcdc_unload(dev);
+fail_irq_uninstall:
+ pm_runtime_get_sync(dev->dev);
+ drm_irq_uninstall(dev);
+ pm_runtime_put_sync(dev->dev);
+
+fail_vblank_cleanup:
+ drm_vblank_cleanup(dev);
+
+fail_mode_config_cleanup:
+ drm_mode_config_cleanup(dev);
+
+fail_cpufreq_unregister:
+ pm_runtime_disable(dev->dev);
+#ifdef CONFIG_CPU_FREQ
+ cpufreq_unregister_notifier(&priv->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+fail_put_disp_clk:
+ clk_put(priv->disp_clk);
+#endif
+
+fail_put_clk:
+ clk_put(priv->clk);
+
+fail_iounmap:
+ iounmap(priv->mmio);
+
+fail_free_wq:
+ flush_workqueue(priv->wq);
+ destroy_workqueue(priv->wq);
+
+fail_free_priv:
+ dev->dev_private = NULL;
+ kfree(priv);
return ret;
}
@@ -628,10 +669,10 @@ static int __init tilcdc_drm_init(void)
static void __exit tilcdc_drm_fini(void)
{
DBG("fini");
- tilcdc_tfp410_fini();
- tilcdc_slave_fini();
- tilcdc_panel_fini();
platform_driver_unregister(&tilcdc_platform_driver);
+ tilcdc_panel_fini();
+ tilcdc_slave_fini();
+ tilcdc_tfp410_fini();
}
late_initcall(tilcdc_drm_init);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index 86c6732..b085dcc 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -151,6 +151,7 @@ struct panel_connector {
static void panel_connector_destroy(struct drm_connector *connector)
{
struct panel_connector *panel_connector = to_panel_connector(connector);
+ drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(panel_connector);
}
@@ -285,10 +286,8 @@ static void panel_destroy(struct tilcdc_module *mod)
{
struct panel_module *panel_mod = to_panel_module(mod);
- if (panel_mod->timings) {
+ if (panel_mod->timings)
display_timings_release(panel_mod->timings);
- kfree(panel_mod->timings);
- }
tilcdc_module_cleanup(mod);
kfree(panel_mod->info);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave.c b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
index 595068b..2f83ffb 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_slave.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
@@ -166,6 +166,7 @@ struct slave_connector {
static void slave_connector_destroy(struct drm_connector *connector)
{
struct slave_connector *slave_connector = to_slave_connector(connector);
+ drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(slave_connector);
}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
index c38b56b..ce75ac8 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
@@ -167,6 +167,7 @@ struct tfp410_connector {
static void tfp410_connector_destroy(struct drm_connector *connector)
{
struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector);
+ drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(tfp410_connector);
}
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 863bef9..76329d2 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -297,9 +297,12 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
*
* @pool: to free the pages from
* @free_all: If set to true will free all pages in pool
+ * @use_static: Safe to use static buffer
**/
-static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
+static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
+ bool use_static)
{
+ static struct page *static_buf[NUM_PAGES_TO_ALLOC];
unsigned long irq_flags;
struct page *p;
struct page **pages_to_free;
@@ -309,8 +312,11 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
if (NUM_PAGES_TO_ALLOC < nr_free)
npages_to_free = NUM_PAGES_TO_ALLOC;
- pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
- GFP_KERNEL);
+ if (use_static)
+ pages_to_free = static_buf;
+ else
+ pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
+ GFP_KERNEL);
if (!pages_to_free) {
pr_err("Failed to allocate memory for pool free operation\n");
return 0;
@@ -373,7 +379,8 @@ restart:
if (freed_pages)
ttm_pages_put(pages_to_free, freed_pages);
out:
- kfree(pages_to_free);
+ if (pages_to_free != static_buf)
+ kfree(pages_to_free);
return nr_free;
}
@@ -382,32 +389,33 @@ out:
*
* XXX: (dchinner) Deadlock warning!
*
- * ttm_page_pool_free() does memory allocation using GFP_KERNEL. that means
- * this can deadlock when called a sc->gfp_mask that is not equal to
- * GFP_KERNEL.
- *
* This code is crying out for a shrinker per pool....
*/
static unsigned long
ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
- static atomic_t start_pool = ATOMIC_INIT(0);
+ static DEFINE_MUTEX(lock);
+ static unsigned start_pool;
unsigned i;
- unsigned pool_offset = atomic_add_return(1, &start_pool);
+ unsigned pool_offset;
struct ttm_page_pool *pool;
int shrink_pages = sc->nr_to_scan;
unsigned long freed = 0;
- pool_offset = pool_offset % NUM_POOLS;
+ if (!mutex_trylock(&lock))
+ return SHRINK_STOP;
+ pool_offset = ++start_pool % NUM_POOLS;
/* select start pool in round robin fashion */
for (i = 0; i < NUM_POOLS; ++i) {
unsigned nr_free = shrink_pages;
if (shrink_pages == 0)
break;
pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
- shrink_pages = ttm_page_pool_free(pool, nr_free);
+ /* OK to use static buffer since global mutex is held. */
+ shrink_pages = ttm_page_pool_free(pool, nr_free, true);
freed += nr_free - shrink_pages;
}
+ mutex_unlock(&lock);
return freed;
}
@@ -706,7 +714,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
}
spin_unlock_irqrestore(&pool->lock, irq_flags);
if (npages)
- ttm_page_pool_free(pool, npages);
+ ttm_page_pool_free(pool, npages, false);
}
/*
@@ -845,8 +853,9 @@ void ttm_page_alloc_fini(void)
pr_info("Finalizing pool allocator\n");
ttm_pool_mm_shrink_fini(_manager);
+ /* OK to use static buffer since global mutex is no longer used. */
for (i = 0; i < NUM_POOLS; ++i)
- ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
+ ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, true);
kobject_put(&_manager->kobj);
_manager = NULL;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index 7957bee..9082ca0 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -410,9 +410,12 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
*
* @pool: to free the pages from
* @nr_free: If set to true will free all pages in pool
+ * @use_static: Safe to use static buffer
**/
-static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
+static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
+ bool use_static)
{
+ static struct page *static_buf[NUM_PAGES_TO_ALLOC];
unsigned long irq_flags;
struct dma_page *dma_p, *tmp;
struct page **pages_to_free;
@@ -429,8 +432,11 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
npages_to_free, nr_free);
}
#endif
- pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
- GFP_KERNEL);
+ if (use_static)
+ pages_to_free = static_buf;
+ else
+ pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
+ GFP_KERNEL);
if (!pages_to_free) {
pr_err("%s: Failed to allocate memory for pool free operation\n",
@@ -500,7 +506,8 @@ restart:
if (freed_pages)
ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
out:
- kfree(pages_to_free);
+ if (pages_to_free != static_buf)
+ kfree(pages_to_free);
return nr_free;
}
@@ -529,7 +536,8 @@ static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
if (pool->type != type)
continue;
/* Takes a spinlock.. */
- ttm_dma_page_pool_free(pool, FREE_ALL_PAGES);
+ /* OK to use static buffer since global mutex is held. */
+ ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, true);
WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
/* This code path is called after _all_ references to the
* struct device has been dropped - so nobody should be
@@ -982,7 +990,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
/* shrink pool if necessary (only on !is_cached pools)*/
if (npages)
- ttm_dma_page_pool_free(pool, npages);
+ ttm_dma_page_pool_free(pool, npages, false);
ttm->state = tt_unpopulated;
}
EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
@@ -992,20 +1000,15 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
*
* XXX: (dchinner) Deadlock warning!
*
- * ttm_dma_page_pool_free() does GFP_KERNEL memory allocation, and so attention
- * needs to be paid to sc->gfp_mask to determine if this can be done or not.
- * GFP_KERNEL memory allocation in a GFP_ATOMIC reclaim context woul dbe really
- * bad.
- *
* I'm getting sadder as I hear more pathetical whimpers about needing per-pool
* shrinkers
*/
static unsigned long
ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
- static atomic_t start_pool = ATOMIC_INIT(0);
+ static unsigned start_pool;
unsigned idx = 0;
- unsigned pool_offset = atomic_add_return(1, &start_pool);
+ unsigned pool_offset;
unsigned shrink_pages = sc->nr_to_scan;
struct device_pools *p;
unsigned long freed = 0;
@@ -1013,8 +1016,11 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
if (list_empty(&_manager->pools))
return SHRINK_STOP;
- mutex_lock(&_manager->lock);
- pool_offset = pool_offset % _manager->npools;
+ if (!mutex_trylock(&_manager->lock))
+ return SHRINK_STOP;
+ if (!_manager->npools)
+ goto out;
+ pool_offset = ++start_pool % _manager->npools;
list_for_each_entry(p, &_manager->pools, pools) {
unsigned nr_free;
@@ -1026,13 +1032,15 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
if (++idx < pool_offset)
continue;
nr_free = shrink_pages;
- shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
+ /* OK to use static buffer since global mutex is held. */
+ shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
freed += nr_free - shrink_pages;
pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
p->pool->dev_name, p->pool->name, current->pid,
nr_free, shrink_pages);
}
+out:
mutex_unlock(&_manager->lock);
return freed;
}
@@ -1043,7 +1051,8 @@ ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
struct device_pools *p;
unsigned long count = 0;
- mutex_lock(&_manager->lock);
+ if (!mutex_trylock(&_manager->lock))
+ return 0;
list_for_each_entry(p, &_manager->pools, pools)
count += p->pool->npages_free;
mutex_unlock(&_manager->lock);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 8b059eb..1b9aa98 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -834,14 +834,36 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
SVGA3dCmdSurfaceDMA dma;
} *cmd;
int ret;
+ SVGA3dCmdSurfaceDMASuffix *suffix;
+ uint32_t bo_size;
cmd = container_of(header, struct vmw_dma_cmd, header);
+ suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
+ header->size - sizeof(*suffix));
+
+ /* Make sure device and verifier stays in sync. */
+ if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
+ DRM_ERROR("Invalid DMA suffix size.\n");
+ return -EINVAL;
+ }
+
ret = vmw_translate_guest_ptr(dev_priv, sw_context,
&cmd->dma.guest.ptr,
&vmw_bo);
if (unlikely(ret != 0))
return ret;
+ /* Make sure DMA doesn't cross BO boundaries. */
+ bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
+ if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
+ DRM_ERROR("Invalid DMA offset.\n");
+ return -EINVAL;
+ }
+
+ bo_size -= cmd->dma.guest.ptr.offset;
+ if (unlikely(suffix->maximumOffset > bo_size))
+ suffix->maximumOffset = bo_size;
+
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter, &cmd->dma.host.sid,
NULL);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index ed5ce2a..1b0f34b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -147,7 +147,7 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var,
}
if (!vmw_kms_validate_mode_vram(vmw_priv,
- info->fix.line_length,
+ var->xres * var->bits_per_pixel/8,
var->yoffset + var->yres)) {
DRM_ERROR("Requested geom can not fit in framebuffer\n");
return -EINVAL;
@@ -162,6 +162,8 @@ static int vmw_fb_set_par(struct fb_info *info)
struct vmw_private *vmw_priv = par->vmw_priv;
int ret;
+ info->fix.line_length = info->var.xres * info->var.bits_per_pixel/8;
+
ret = vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
info->fix.line_length,
par->bpp, par->depth);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index c62d20e..ee742f1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -1049,6 +1049,8 @@ int vmw_event_fence_action_create(struct drm_file *file_priv,
if (ret != 0)
goto out_no_queue;
+ return 0;
+
out_no_queue:
event->base.destroy(&event->base);
out_no_event:
@@ -1123,17 +1125,10 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
BUG_ON(fence == NULL);
- if (arg->flags & DRM_VMW_FE_FLAG_REQ_TIME)
- ret = vmw_event_fence_action_create(file_priv, fence,
- arg->flags,
- arg->user_data,
- true);
- else
- ret = vmw_event_fence_action_create(file_priv, fence,
- arg->flags,
- arg->user_data,
- true);
-
+ ret = vmw_event_fence_action_create(file_priv, fence,
+ arg->flags,
+ arg->user_data,
+ true);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Failed to attach event to fence.\n");
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 3eb1486..8966493 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -163,8 +163,9 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
mutex_lock(&dev_priv->hw_mutex);
+ vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
- vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
+ ;
dev_priv->last_read_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index fc43c06..dab6fab 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1939,6 +1939,14 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
};
int i;
+ u32 assumed_bpp = 2;
+
+ /*
+ * If using screen objects, then assume 32-bpp because that's what the
+ * SVGA device is assuming
+ */
+ if (dev_priv->sou_priv)
+ assumed_bpp = 4;
/* Add preferred mode */
{
@@ -1949,8 +1957,9 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
mode->vdisplay = du->pref_height;
vmw_guess_mode_timing(mode);
- if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2,
- mode->vdisplay)) {
+ if (vmw_kms_validate_mode_vram(dev_priv,
+ mode->hdisplay * assumed_bpp,
+ mode->vdisplay)) {
drm_mode_probed_add(connector, mode);
} else {
drm_mode_destroy(dev, mode);
@@ -1972,7 +1981,8 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
bmode->vdisplay > max_height)
continue;
- if (!vmw_kms_validate_mode_vram(dev_priv, bmode->hdisplay * 2,
+ if (!vmw_kms_validate_mode_vram(dev_priv,
+ bmode->hdisplay * assumed_bpp,
bmode->vdisplay))
continue;
diff --git a/drivers/gpu/host1x/hw/intr_hw.c b/drivers/gpu/host1x/hw/intr_hw.c
index b592eef..b083509 100644
--- a/drivers/gpu/host1x/hw/intr_hw.c
+++ b/drivers/gpu/host1x/hw/intr_hw.c
@@ -48,7 +48,7 @@ static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id)
unsigned long reg;
int i, id;
- for (i = 0; i <= BIT_WORD(host->info->nb_pts); i++) {
+ for (i = 0; i < DIV_ROUND_UP(host->info->nb_pts, 32); i++) {
reg = host1x_sync_readl(host,
HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i));
for_each_set_bit(id, &reg, BITS_PER_LONG) {
@@ -65,7 +65,7 @@ static void _host1x_intr_disable_all_syncpt_intrs(struct host1x *host)
{
u32 i;
- for (i = 0; i <= BIT_WORD(host->info->nb_pts); ++i) {
+ for (i = 0; i < DIV_ROUND_UP(host->info->nb_pts, 32); ++i) {
host1x_sync_writel(host, 0xffffffffu,
HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(i));
host1x_sync_writel(host, 0xffffffffu,
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index ec0ae2d..37ac7b5 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -623,7 +623,8 @@ static int vga_switcheroo_runtime_suspend(struct device *dev)
ret = dev->bus->pm->runtime_suspend(dev);
if (ret)
return ret;
-
+ if (vgasr_priv.handler->switchto)
+ vgasr_priv.handler->switchto(VGA_SWITCHEROO_IGD);
vga_switcheroo_power_switch(pdev, VGA_SWITCHEROO_OFF);
return 0;
}
@@ -659,6 +660,12 @@ int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *
}
EXPORT_SYMBOL(vga_switcheroo_init_domain_pm_ops);
+void vga_switcheroo_fini_domain_pm_ops(struct device *dev)
+{
+ dev->pm_domain = NULL;
+}
+EXPORT_SYMBOL(vga_switcheroo_fini_domain_pm_ops);
+
static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);