summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c1
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c1
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c27
-rw-r--r--drivers/gpu/drm/drm_irq.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_ddc.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c19
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c13
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmiphy.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c1
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c20
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c17
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h4
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c101
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c8
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h12
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c10
-rw-r--r--drivers/gpu/drm/i915/intel_display.c159
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c5
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c19
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c12
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c24
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c65
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c38
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c46
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/falcon.c19
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/vp/nve0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/xtensa.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/falcon.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/vm.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c22
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c14
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/base.c27
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c26
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c69
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c46
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c14
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c42
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c70
-rw-r--r--drivers/gpu/drm/qxl/qxl_draw.c263
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h76
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c184
-rw-r--r--drivers/gpu/drm/qxl/qxl_fence.c10
-rw-r--r--drivers/gpu/drm/qxl/qxl_gem.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_image.c111
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c319
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c70
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.h6
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c212
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c2
-rw-r--r--drivers/gpu/drm/radeon/atom.c5
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c43
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.c17
-rw-r--r--drivers/gpu/drm/radeon/cik.c73
-rw-r--r--drivers/gpu/drm/radeon/cikd.h16
-rw-r--r--drivers/gpu/drm/radeon/cypress_dpm.c17
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c19
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c32
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h3
-rw-r--r--drivers/gpu/drm/radeon/ni.c198
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c24
-rw-r--r--drivers/gpu/drm/radeon/nid.h16
-rw-r--r--drivers/gpu/drm/radeon/r100.c11
-rw-r--r--drivers/gpu/drm/radeon/r600.c146
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c4
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c43
-rw-r--r--drivers/gpu/drm/radeon/r600d.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon.h14
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h9
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c42
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c159
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c182
-rw-r--r--drivers/gpu/drm/radeon/rs780_dpm.c25
-rw-r--r--drivers/gpu/drm/radeon/rs780d.h3
-rw-r--r--drivers/gpu/drm/radeon/rv6xx_dpm.c77
-rw-r--r--drivers/gpu/drm/radeon/rv770.c17
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c47
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.h1
-rw-r--r--drivers/gpu/drm/radeon/si.c331
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c100
-rw-r--r--drivers/gpu/drm/radeon/sid.h14
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c14
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c9
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c9
119 files changed, 2762 insertions, 1328 deletions
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 98d6708..6e8887fe 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -323,6 +323,7 @@ int ast_bo_create(struct drm_device *dev, int size, int align,
astbo->gem.driver_private = NULL;
astbo->bo.bdev = &ast->ttm.bdev;
+ astbo->bo.bdev->dev_mapping = dev->dev_mapping;
ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 0047012..69fd8f1 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -328,6 +328,7 @@ int cirrus_bo_create(struct drm_device *dev, int size, int align,
cirrusbo->gem.driver_private = NULL;
cirrusbo->bo.bdev = &cirrus->ttm.bdev;
+ cirrusbo->bo.bdev->dev_mapping = dev->dev_mapping;
cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 738a429..6a64749 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -677,6 +677,11 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
/* don't break so fail path works correct */
fail = 1;
break;
+
+ if (connector->dpms != DRM_MODE_DPMS_ON) {
+ DRM_DEBUG_KMS("connector dpms not on, full mode switch\n");
+ mode_changed = true;
+ }
}
}
@@ -754,6 +759,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
ret = -EINVAL;
goto fail;
}
+ DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
+ for (i = 0; i < set->num_connectors; i++) {
+ DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
+ drm_get_connector_name(set->connectors[i]));
+ set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON);
+ }
}
drm_helper_disable_unused_functions(dev);
} else if (fb_changed) {
@@ -771,22 +782,6 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
}
}
- /*
- * crtc set_config helpers implicit set the crtc and all connected
- * encoders to DPMS on for a full mode set. But for just an fb update it
- * doesn't do that. To not confuse userspace, do an explicit DPMS_ON
- * unconditionally. This will also ensure driver internal dpms state is
- * consistent again.
- */
- if (set->crtc->enabled) {
- DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
- for (i = 0; i < set->num_connectors; i++) {
- DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
- drm_get_connector_name(set->connectors[i]));
- set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON);
- }
- }
-
kfree(save_connectors);
kfree(save_encoders);
kfree(save_crtcs);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 8bcce78..f92da0a 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -708,7 +708,10 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
/* Subtract time delta from raw timestamp to get final
* vblank_time timestamp for end of vblank.
*/
- etime = ktime_sub_ns(etime, delta_ns);
+ if (delta_ns < 0)
+ etime = ktime_add_ns(etime, -delta_ns);
+ else
+ etime = ktime_sub_ns(etime, delta_ns);
*vblank_time = ktime_to_timeval(etime);
DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
diff --git a/drivers/gpu/drm/exynos/exynos_ddc.c b/drivers/gpu/drm/exynos/exynos_ddc.c
index 95c75ed..30ef41b 100644
--- a/drivers/gpu/drm/exynos/exynos_ddc.c
+++ b/drivers/gpu/drm/exynos/exynos_ddc.c
@@ -15,7 +15,6 @@
#include <linux/kernel.h>
#include <linux/i2c.h>
-#include <linux/module.h>
#include "exynos_drm_drv.h"
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 61b094f..6e047bd 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -12,7 +12,6 @@
*
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 3e106be..1c263da 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -14,7 +14,6 @@
#include <drm/drmP.h>
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/of_device.h>
@@ -130,7 +129,6 @@ static const struct of_device_id fimd_driver_dt_match[] = {
.data = &exynos5_fimd_driver_data },
{},
};
-MODULE_DEVICE_TABLE(of, fimd_driver_dt_match);
#endif
static inline struct fimd_driver_data *drm_fimd_get_driver_data(
@@ -1082,7 +1080,6 @@ static struct platform_device_id fimd_driver_ids[] = {
},
{},
};
-MODULE_DEVICE_TABLE(platform, fimd_driver_ids);
static const struct dev_pm_ops fimd_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(fimd_suspend, fimd_resume)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 42a5a54..eddea49 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -8,7 +8,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/interrupt.h>
@@ -806,9 +805,20 @@ static void g2d_dma_start(struct g2d_data *g2d,
struct g2d_cmdlist_node *node =
list_first_entry(&runqueue_node->run_cmdlist,
struct g2d_cmdlist_node, list);
+ int ret;
+
+ ret = pm_runtime_get_sync(g2d->dev);
+ if (ret < 0) {
+ dev_warn(g2d->dev, "failed pm power on.\n");
+ return;
+ }
- pm_runtime_get_sync(g2d->dev);
- clk_enable(g2d->gate_clk);
+ ret = clk_prepare_enable(g2d->gate_clk);
+ if (ret < 0) {
+ dev_warn(g2d->dev, "failed to enable clock.\n");
+ pm_runtime_put_sync(g2d->dev);
+ return;
+ }
writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR);
writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND);
@@ -861,7 +871,7 @@ static void g2d_runqueue_worker(struct work_struct *work)
runqueue_work);
mutex_lock(&g2d->runqueue_mutex);
- clk_disable(g2d->gate_clk);
+ clk_disable_unprepare(g2d->gate_clk);
pm_runtime_put_sync(g2d->dev);
complete(&g2d->runqueue_node->complete);
@@ -1521,7 +1531,6 @@ static const struct of_device_id exynos_g2d_match[] = {
{ .compatible = "samsung,exynos5250-g2d" },
{},
};
-MODULE_DEVICE_TABLE(of, exynos_g2d_match);
#endif
struct platform_driver g2d_driver = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 472e3b2..90b8a1a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -12,7 +12,6 @@
*
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
index aaa550d..8d3bc01 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -15,7 +15,6 @@
#include <linux/kernel.h>
#include <linux/wait.h>
-#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index b1ef8e7..d2b6ab4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -12,7 +12,6 @@
*
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#include <linux/clk.h>
@@ -342,10 +341,10 @@ int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
*/
ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
prop_list->ipp_id);
- if (!ippdrv) {
+ if (IS_ERR(ippdrv)) {
DRM_ERROR("not found ipp%d driver.\n",
prop_list->ipp_id);
- return -EINVAL;
+ return PTR_ERR(ippdrv);
}
prop_list = ippdrv->prop_list;
@@ -970,9 +969,9 @@ int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
/* find command node */
c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
qbuf->prop_id);
- if (!c_node) {
+ if (IS_ERR(c_node)) {
DRM_ERROR("failed to get command node.\n");
- return -EFAULT;
+ return PTR_ERR(c_node);
}
/* buffer control */
@@ -1106,9 +1105,9 @@ int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
cmd_ctrl->prop_id);
- if (!c_node) {
+ if (IS_ERR(c_node)) {
DRM_ERROR("invalid command node list.\n");
- return -EINVAL;
+ return PTR_ERR(c_node);
}
if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 427640a..49669aa 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -10,7 +10,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 41cc74d..c57c565 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -13,7 +13,6 @@
#include <drm/drmP.h>
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/platform_device.h>
#include <drm/exynos_drm.h>
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 62ef597..2f5c694 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -24,7 +24,6 @@
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/i2c.h>
-#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
diff --git a/drivers/gpu/drm/exynos/exynos_hdmiphy.c b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
index ef04255..6e320ae 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmiphy.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
@@ -15,7 +15,6 @@
#include <linux/kernel.h>
#include <linux/i2c.h>
-#include <linux/module.h>
#include "exynos_drm_drv.h"
#include "exynos_hdmi.h"
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 42ffb71..c9a137c 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -23,7 +23,6 @@
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/i2c.h>
-#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index adb319b..f466980 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1495,6 +1495,15 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev_priv->dev = dev;
dev_priv->info = info;
+ spin_lock_init(&dev_priv->irq_lock);
+ spin_lock_init(&dev_priv->gpu_error.lock);
+ spin_lock_init(&dev_priv->rps.lock);
+ spin_lock_init(&dev_priv->gt_lock);
+ spin_lock_init(&dev_priv->backlight.lock);
+ mutex_init(&dev_priv->dpio_lock);
+ mutex_init(&dev_priv->rps.hw_lock);
+ mutex_init(&dev_priv->modeset_restore_lock);
+
i915_dump_device_info(dev_priv);
if (i915_get_bridge_dev(dev)) {
@@ -1585,6 +1594,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_detect_pch(dev);
intel_irq_init(dev);
+ intel_pm_init(dev);
+ intel_gt_sanitize(dev);
intel_gt_init(dev);
/* Try to make sure MCHBAR is enabled before poking at it */
@@ -1610,15 +1621,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (!IS_I945G(dev) && !IS_I945GM(dev))
pci_enable_msi(dev->pdev);
- spin_lock_init(&dev_priv->irq_lock);
- spin_lock_init(&dev_priv->gpu_error.lock);
- spin_lock_init(&dev_priv->rps.lock);
- spin_lock_init(&dev_priv->backlight.lock);
- mutex_init(&dev_priv->dpio_lock);
-
- mutex_init(&dev_priv->rps.hw_lock);
- mutex_init(&dev_priv->modeset_restore_lock);
-
dev_priv->num_plane = 1;
if (IS_VALLEYVIEW(dev))
dev_priv->num_plane = 2;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 062cbda..45b3c03 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -123,10 +123,10 @@ module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 060
MODULE_PARM_DESC(preliminary_hw_support,
"Enable preliminary hardware support. (default: false)");
-int i915_disable_power_well __read_mostly = 0;
+int i915_disable_power_well __read_mostly = 1;
module_param_named(disable_power_well, i915_disable_power_well, int, 0600);
MODULE_PARM_DESC(disable_power_well,
- "Disable the power well when possible (default: false)");
+ "Disable the power well when possible (default: true)");
int i915_enable_ips __read_mostly = 1;
module_param_named(enable_ips, i915_enable_ips, int, 0600);
@@ -706,7 +706,7 @@ static int i915_drm_thaw(struct drm_device *dev)
{
int error = 0;
- intel_gt_reset(dev);
+ intel_gt_sanitize(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
mutex_lock(&dev->struct_mutex);
@@ -732,7 +732,7 @@ int i915_resume(struct drm_device *dev)
pci_set_master(dev->pdev);
- intel_gt_reset(dev);
+ intel_gt_sanitize(dev);
/*
* Platforms with opregion should have sane BIOS, older ones (gen3 and
@@ -1253,21 +1253,21 @@ hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
+ unsigned long irqflags; \
u##x val = 0; \
+ spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
if (IS_GEN5(dev_priv->dev)) \
ilk_dummy_write(dev_priv); \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
- unsigned long irqflags; \
- spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
if (dev_priv->forcewake_count == 0) \
dev_priv->gt.force_wake_get(dev_priv); \
val = read##y(dev_priv->regs + reg); \
if (dev_priv->forcewake_count == 0) \
dev_priv->gt.force_wake_put(dev_priv); \
- spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
} else { \
val = read##y(dev_priv->regs + reg); \
} \
+ spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
trace_i915_reg_rw(false, reg, val, sizeof(val)); \
return val; \
}
@@ -1280,8 +1280,10 @@ __i915_read(64, q)
#define __i915_write(x, y) \
void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
+ unsigned long irqflags; \
u32 __fifo_ret = 0; \
trace_i915_reg_rw(true, reg, val, sizeof(val)); \
+ spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
@@ -1293,6 +1295,7 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
gen6_gt_check_fifodbg(dev_priv); \
} \
hsw_unclaimed_reg_check(dev_priv, reg); \
+ spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
}
__i915_write(8, b)
__i915_write(16, w)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a416645..1929bff 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -555,6 +555,7 @@ enum intel_sbi_destination {
#define QUIRK_PIPEA_FORCE (1<<0)
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
+#define QUIRK_NO_PCH_PWM_ENABLE (1<<3)
struct intel_fbdev;
struct intel_fbc_work;
@@ -1581,9 +1582,10 @@ void i915_hangcheck_elapsed(unsigned long data);
void i915_handle_error(struct drm_device *dev, bool wedged);
extern void intel_irq_init(struct drm_device *dev);
+extern void intel_pm_init(struct drm_device *dev);
extern void intel_hpd_init(struct drm_device *dev);
extern void intel_gt_init(struct drm_device *dev);
-extern void intel_gt_reset(struct drm_device *dev);
+extern void intel_gt_sanitize(struct drm_device *dev);
void i915_error_state_free(struct kref *error_ref);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4200c32..d9e2208 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1880,6 +1880,10 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
u32 seqno = intel_ring_get_seqno(ring);
BUG_ON(ring == NULL);
+ if (obj->ring != ring && obj->last_write_seqno) {
+ /* Keep the seqno relative to the current ring */
+ obj->last_write_seqno = seqno;
+ }
obj->ring = ring;
/* Add a reference if we're newly entering the active list. */
@@ -2254,7 +2258,17 @@ void i915_gem_restore_fences(struct drm_device *dev)
for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
- i915_gem_write_fence(dev, i, reg->obj);
+
+ /*
+ * Commit delayed tiling changes if we have an object still
+ * attached to the fence, otherwise just clear the fence.
+ */
+ if (reg->obj) {
+ i915_gem_object_update_fence(reg->obj, reg,
+ reg->obj->tiling_mode);
+ } else {
+ i915_gem_write_fence(dev, i, NULL);
+ }
}
}
@@ -2653,7 +2667,6 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
drm_i915_private_t *dev_priv = dev->dev_private;
int fence_reg;
int fence_pitch_shift;
- uint64_t val;
if (INTEL_INFO(dev)->gen >= 6) {
fence_reg = FENCE_REG_SANDYBRIDGE_0;
@@ -2663,8 +2676,23 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
}
+ fence_reg += reg * 8;
+
+ /* To w/a incoherency with non-atomic 64-bit register updates,
+ * we split the 64-bit update into two 32-bit writes. In order
+ * for a partial fence not to be evaluated between writes, we
+ * precede the update with write to turn off the fence register,
+ * and only enable the fence as the last step.
+ *
+ * For extra levels of paranoia, we make sure each step lands
+ * before applying the next step.
+ */
+ I915_WRITE(fence_reg, 0);
+ POSTING_READ(fence_reg);
+
if (obj) {
u32 size = obj->gtt_space->size;
+ uint64_t val;
val = (uint64_t)((obj->gtt_offset + size - 4096) &
0xfffff000) << 32;
@@ -2673,12 +2701,16 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I965_FENCE_TILING_Y_SHIFT;
val |= I965_FENCE_REG_VALID;
- } else
- val = 0;
- fence_reg += reg * 8;
- I915_WRITE64(fence_reg, val);
- POSTING_READ(fence_reg);
+ I915_WRITE(fence_reg + 4, val >> 32);
+ POSTING_READ(fence_reg + 4);
+
+ I915_WRITE(fence_reg + 0, val);
+ POSTING_READ(fence_reg);
+ } else {
+ I915_WRITE(fence_reg + 4, 0);
+ POSTING_READ(fence_reg + 4);
+ }
}
static void i915_write_fence_reg(struct drm_device *dev, int reg,
@@ -2773,6 +2805,10 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg,
if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
mb();
+ WARN(obj && (!obj->stride || !obj->tiling_mode),
+ "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
+ obj->stride, obj->tiling_mode);
+
switch (INTEL_INFO(dev)->gen) {
case 7:
case 6:
@@ -2796,56 +2832,17 @@ static inline int fence_number(struct drm_i915_private *dev_priv,
return fence - dev_priv->fence_regs;
}
-struct write_fence {
- struct drm_device *dev;
- struct drm_i915_gem_object *obj;
- int fence;
-};
-
-static void i915_gem_write_fence__ipi(void *data)
-{
- struct write_fence *args = data;
-
- /* Required for SNB+ with LLC */
- wbinvd();
-
- /* Required for VLV */
- i915_gem_write_fence(args->dev, args->fence, args->obj);
-}
-
static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
struct drm_i915_fence_reg *fence,
bool enable)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
- struct write_fence args = {
- .dev = obj->base.dev,
- .fence = fence_number(dev_priv, fence),
- .obj = enable ? obj : NULL,
- };
-
- /* In order to fully serialize access to the fenced region and
- * the update to the fence register we need to take extreme
- * measures on SNB+. In theory, the write to the fence register
- * flushes all memory transactions before, and coupled with the
- * mb() placed around the register write we serialise all memory
- * operations with respect to the changes in the tiler. Yet, on
- * SNB+ we need to take a step further and emit an explicit wbinvd()
- * on each processor in order to manually flush all memory
- * transactions before updating the fence register.
- *
- * However, Valleyview complicates matter. There the wbinvd is
- * insufficient and unlike SNB/IVB requires the serialising
- * register write. (Note that that register write by itself is
- * conversely not sufficient for SNB+.) To compromise, we do both.
- */
- if (INTEL_INFO(args.dev)->gen >= 6)
- on_each_cpu(i915_gem_write_fence__ipi, &args, 1);
- else
- i915_gem_write_fence(args.dev, args.fence, args.obj);
+ int reg = fence_number(dev_priv, fence);
+
+ i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
if (enable) {
- obj->fence_reg = args.fence;
+ obj->fence_reg = reg;
fence->obj = obj;
list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
} else {
@@ -2853,6 +2850,7 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
fence->obj = NULL;
list_del_init(&fence->lru_list);
}
+ obj->fence_dirty = false;
}
static int
@@ -2982,7 +2980,6 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
return 0;
i915_gem_object_update_fence(obj, reg, enable);
- obj->fence_dirty = false;
return 0;
}
@@ -4611,7 +4608,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
if (obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;
- list_for_each_entry(obj, &dev_priv->mm.inactive_list, global_list)
+ list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list)
if (obj->pin_count == 0 && obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index dc53a52..9e65783 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -85,9 +85,17 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
struct sg_table *sg,
enum dma_data_direction dir)
{
+ struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
+
+ mutex_lock(&obj->base.dev->struct_mutex);
+
dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
sg_free_table(sg);
kfree(sg);
+
+ i915_gem_object_unpin_pages(obj);
+
+ mutex_unlock(&obj->base.dev->struct_mutex);
}
static void i915_gem_dmabuf_release(struct dma_buf *dma_buf)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index f2326fc..6f51429 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1856,10 +1856,16 @@
#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
#define PORT_HOTPLUG_STAT (dev_priv->info->display_mmio_offset + 0x61114)
-/* HDMI/DP bits are gen4+ */
-#define PORTB_HOTPLUG_LIVE_STATUS (1 << 29)
+/*
+ * HDMI/DP bits are gen4+
+ *
+ * WARNING: Bspec for hpd status bits on gen4 seems to be completely confused.
+ * Please check the detailed lore in the commit message for for experimental
+ * evidence.
+ */
+#define PORTD_HOTPLUG_LIVE_STATUS (1 << 29)
#define PORTC_HOTPLUG_LIVE_STATUS (1 << 28)
-#define PORTD_HOTPLUG_LIVE_STATUS (1 << 27)
+#define PORTB_HOTPLUG_LIVE_STATUS (1 << 27)
#define PORTD_HOTPLUG_INT_STATUS (3 << 21)
#define PORTC_HOTPLUG_INT_STATUS (3 << 19)
#define PORTB_HOTPLUG_INT_STATUS (3 << 17)
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 324211a..b042ee5 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -301,7 +301,7 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder,
struct intel_digital_port *intel_dig_port =
enc_to_dig_port(encoder);
- intel_dp->DP = intel_dig_port->port_reversal |
+ intel_dp->DP = intel_dig_port->saved_port_bits |
DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
@@ -1109,7 +1109,8 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
* enabling the port.
*/
I915_WRITE(DDI_BUF_CTL(port),
- intel_dig_port->port_reversal | DDI_BUF_CTL_ENABLE);
+ intel_dig_port->saved_port_bits |
+ DDI_BUF_CTL_ENABLE);
} else if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -1347,8 +1348,9 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
intel_encoder->get_config = intel_ddi_get_config;
intel_dig_port->port = port;
- intel_dig_port->port_reversal = I915_READ(DDI_BUF_CTL(port)) &
- DDI_BUF_PORT_REVERSAL;
+ intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
+ (DDI_BUF_PORT_REVERSAL |
+ DDI_A_4_LANES);
intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 85f3eb7..be79f47 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -4913,22 +4913,19 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
uint32_t tmp;
tmp = I915_READ(PFIT_CONTROL);
+ if (!(tmp & PFIT_ENABLE))
+ return;
+ /* Check whether the pfit is attached to our pipe. */
if (INTEL_INFO(dev)->gen < 4) {
if (crtc->pipe != PIPE_B)
return;
-
- /* gen2/3 store dither state in pfit control, needs to match */
- pipe_config->gmch_pfit.control = tmp & PANEL_8TO6_DITHER_ENABLE;
} else {
if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
return;
}
- if (!(tmp & PFIT_ENABLE))
- return;
-
- pipe_config->gmch_pfit.control = I915_READ(PFIT_CONTROL);
+ pipe_config->gmch_pfit.control = tmp;
pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
if (INTEL_INFO(dev)->gen < 5)
pipe_config->gmch_pfit.lvds_border_bits =
@@ -8272,9 +8269,11 @@ check_crtc_state(struct drm_device *dev)
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
+ enum pipe pipe;
if (encoder->base.crtc != &crtc->base)
continue;
- if (encoder->get_config)
+ if (encoder->get_config &&
+ encoder->get_hw_state(encoder, &pipe))
encoder->get_config(encoder, &pipe_config);
}
@@ -8317,6 +8316,8 @@ check_shared_dpll_state(struct drm_device *dev)
pll->active, pll->refcount);
WARN(pll->active && !pll->on,
"pll in active use but not on in sw tracking\n");
+ WARN(pll->on && !pll->active,
+ "pll in on but not on in use in sw tracking\n");
WARN(pll->on != active,
"pll on state mismatch (expected %i, found %i)\n",
pll->on, active);
@@ -8541,15 +8542,20 @@ static void intel_set_config_restore_state(struct drm_device *dev,
}
static bool
-is_crtc_connector_off(struct drm_crtc *crtc, struct drm_connector *connectors,
- int num_connectors)
+is_crtc_connector_off(struct drm_mode_set *set)
{
int i;
- for (i = 0; i < num_connectors; i++)
- if (connectors[i].encoder &&
- connectors[i].encoder->crtc == crtc &&
- connectors[i].dpms != DRM_MODE_DPMS_ON)
+ if (set->num_connectors == 0)
+ return false;
+
+ if (WARN_ON(set->connectors == NULL))
+ return false;
+
+ for (i = 0; i < set->num_connectors; i++)
+ if (set->connectors[i]->encoder &&
+ set->connectors[i]->encoder->crtc == set->crtc &&
+ set->connectors[i]->dpms != DRM_MODE_DPMS_ON)
return true;
return false;
@@ -8562,10 +8568,8 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
/* We should be able to check here if the fb has the same properties
* and then just flip_or_move it */
- if (set->connectors != NULL &&
- is_crtc_connector_off(set->crtc, *set->connectors,
- set->num_connectors)) {
- config->mode_changed = true;
+ if (is_crtc_connector_off(set)) {
+ config->mode_changed = true;
} else if (set->crtc->fb != set->fb) {
/* If we have no fb then treat it as a full mode set */
if (set->crtc->fb == NULL) {
@@ -9398,6 +9402,17 @@ static void quirk_invert_brightness(struct drm_device *dev)
DRM_INFO("applying inverted panel brightness quirk\n");
}
+/*
+ * Some machines (Dell XPS13) suffer broken backlight controls if
+ * BLM_PCH_PWM_ENABLE is set.
+ */
+static void quirk_no_pcm_pwm_enable(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ dev_priv->quirks |= QUIRK_NO_PCH_PWM_ENABLE;
+ DRM_INFO("applying no-PCH_PWM_ENABLE quirk\n");
+}
+
struct intel_quirk {
int device;
int subsystem_vendor;
@@ -9467,6 +9482,11 @@ static struct intel_quirk intel_quirks[] = {
/* Acer Aspire 4736Z */
{ 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
+
+ /* Dell XPS13 HD Sandy Bridge */
+ { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
+ /* Dell XPS13 HD and XPS13 FHD Ivy Bridge */
+ { 0x0166, 0x1028, 0x058b, quirk_no_pcm_pwm_enable },
};
static void intel_init_quirks(struct drm_device *dev)
@@ -9817,8 +9837,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
}
pll->refcount = pll->active;
- DRM_DEBUG_KMS("%s hw state readout: refcount %i\n",
- pll->name, pll->refcount);
+ DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n",
+ pll->name, pll->refcount, pll->on);
}
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
@@ -9869,6 +9889,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
struct drm_plane *plane;
struct intel_crtc *crtc;
struct intel_encoder *encoder;
+ int i;
intel_modeset_readout_hw_state(dev);
@@ -9884,6 +9905,18 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]");
}
+ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
+
+ if (!pll->on || pll->active)
+ continue;
+
+ DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
+
+ pll->disable(dev_priv, pll);
+ pll->on = false;
+ }
+
if (force_restore) {
/*
* We need to use raw interfaces for restoring state to avoid
@@ -10009,6 +10042,8 @@ struct intel_display_error_state {
u32 power_well_driver;
+ int num_transcoders;
+
struct intel_cursor_error_state {
u32 control;
u32 position;
@@ -10017,16 +10052,7 @@ struct intel_display_error_state {
} cursor[I915_MAX_PIPES];
struct intel_pipe_error_state {
- enum transcoder cpu_transcoder;
- u32 conf;
u32 source;
-
- u32 htotal;
- u32 hblank;
- u32 hsync;
- u32 vtotal;
- u32 vblank;
- u32 vsync;
} pipe[I915_MAX_PIPES];
struct intel_plane_error_state {
@@ -10038,6 +10064,19 @@ struct intel_display_error_state {
u32 surface;
u32 tile_offset;
} plane[I915_MAX_PIPES];
+
+ struct intel_transcoder_error_state {
+ enum transcoder cpu_transcoder;
+
+ u32 conf;
+
+ u32 htotal;
+ u32 hblank;
+ u32 hsync;
+ u32 vtotal;
+ u32 vblank;
+ u32 vsync;
+ } transcoder[4];
};
struct intel_display_error_state *
@@ -10045,9 +10084,17 @@ intel_display_capture_error_state(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_display_error_state *error;
- enum transcoder cpu_transcoder;
+ int transcoders[] = {
+ TRANSCODER_A,
+ TRANSCODER_B,
+ TRANSCODER_C,
+ TRANSCODER_EDP,
+ };
int i;
+ if (INTEL_INFO(dev)->num_pipes == 0)
+ return NULL;
+
error = kmalloc(sizeof(*error), GFP_ATOMIC);
if (error == NULL)
return NULL;
@@ -10056,9 +10103,6 @@ intel_display_capture_error_state(struct drm_device *dev)
error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
for_each_pipe(i) {
- cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i);
- error->pipe[i].cpu_transcoder = cpu_transcoder;
-
if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) {
error->cursor[i].control = I915_READ(CURCNTR(i));
error->cursor[i].position = I915_READ(CURPOS(i));
@@ -10082,14 +10126,25 @@ intel_display_capture_error_state(struct drm_device *dev)
error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
}
- error->pipe[i].conf = I915_READ(PIPECONF(cpu_transcoder));
error->pipe[i].source = I915_READ(PIPESRC(i));
- error->pipe[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
- error->pipe[i].hblank = I915_READ(HBLANK(cpu_transcoder));
- error->pipe[i].hsync = I915_READ(HSYNC(cpu_transcoder));
- error->pipe[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
- error->pipe[i].vblank = I915_READ(VBLANK(cpu_transcoder));
- error->pipe[i].vsync = I915_READ(VSYNC(cpu_transcoder));
+ }
+
+ error->num_transcoders = INTEL_INFO(dev)->num_pipes;
+ if (HAS_DDI(dev_priv->dev))
+ error->num_transcoders++; /* Account for eDP. */
+
+ for (i = 0; i < error->num_transcoders; i++) {
+ enum transcoder cpu_transcoder = transcoders[i];
+
+ error->transcoder[i].cpu_transcoder = cpu_transcoder;
+
+ error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
+ error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
+ error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
+ error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
+ error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
+ error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
+ error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
}
/* In the code above we read the registers without checking if the power
@@ -10111,22 +10166,16 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
{
int i;
+ if (!error)
+ return;
+
err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
if (HAS_POWER_WELL(dev))
err_printf(m, "PWR_WELL_CTL2: %08x\n",
error->power_well_driver);
for_each_pipe(i) {
err_printf(m, "Pipe [%d]:\n", i);
- err_printf(m, " CPU transcoder: %c\n",
- transcoder_name(error->pipe[i].cpu_transcoder));
- err_printf(m, " CONF: %08x\n", error->pipe[i].conf);
err_printf(m, " SRC: %08x\n", error->pipe[i].source);
- err_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal);
- err_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank);
- err_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync);
- err_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal);
- err_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank);
- err_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync);
err_printf(m, "Plane [%d]:\n", i);
err_printf(m, " CNTR: %08x\n", error->plane[i].control);
@@ -10147,5 +10196,17 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
err_printf(m, " POS: %08x\n", error->cursor[i].position);
err_printf(m, " BASE: %08x\n", error->cursor[i].base);
}
+
+ for (i = 0; i < error->num_transcoders; i++) {
+ err_printf(m, " CPU transcoder: %c\n",
+ transcoder_name(error->transcoder[i].cpu_transcoder));
+ err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
+ err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
+ err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
+ err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync);
+ err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal);
+ err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank);
+ err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
+ }
}
#endif
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index b739712..26e162b 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -75,7 +75,12 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp)
case DP_LINK_BW_1_62:
case DP_LINK_BW_2_7:
break;
+ case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */
+ max_link_bw = DP_LINK_BW_2_7;
+ break;
default:
+ WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
+ max_link_bw);
max_link_bw = DP_LINK_BW_1_62;
break;
}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index c8c9b6f..b7d6e09 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -504,7 +504,7 @@ struct intel_dp {
struct intel_digital_port {
struct intel_encoder base;
enum port port;
- u32 port_reversal;
+ u32 saved_port_bits;
struct intel_dp dp;
struct intel_hdmi hdmi;
};
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 98df2a0..2fd3fd5 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -785,10 +785,22 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
}
}
+static int hdmi_portclock_limit(struct intel_hdmi *hdmi)
+{
+ struct drm_device *dev = intel_hdmi_to_dev(hdmi);
+
+ if (IS_G4X(dev))
+ return 165000;
+ else if (IS_HASWELL(dev))
+ return 300000;
+ else
+ return 225000;
+}
+
static int intel_hdmi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- if (mode->clock > 165000)
+ if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector)))
return MODE_CLOCK_HIGH;
if (mode->clock < 20000)
return MODE_CLOCK_LOW;
@@ -806,6 +818,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
int clock_12bpc = pipe_config->requested_mode.clock * 3 / 2;
+ int portclock_limit = hdmi_portclock_limit(intel_hdmi);
int desired_bpp;
if (intel_hdmi->color_range_auto) {
@@ -829,7 +842,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
* outputs. We also need to check that the higher clock still fits
* within limits.
*/
- if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= 225000
+ if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= portclock_limit
&& HAS_PCH_SPLIT(dev)) {
DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
desired_bpp = 12*3;
@@ -846,7 +859,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
pipe_config->pipe_bpp = desired_bpp;
}
- if (adjusted_mode->clock > 225000) {
+ if (adjusted_mode->clock > portclock_limit) {
DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n");
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 021e8da..61348ea 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -109,6 +109,13 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
flags |= DRM_MODE_FLAG_PVSYNC;
pipe_config->adjusted_mode.flags |= flags;
+
+ /* gen2/3 store dither state in pfit control, needs to match */
+ if (INTEL_INFO(dev)->gen < 4) {
+ tmp = I915_READ(PFIT_CONTROL);
+
+ pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
+ }
}
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
@@ -290,14 +297,11 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
intel_pch_panel_fitting(intel_crtc, pipe_config,
intel_connector->panel.fitting_mode);
- return true;
} else {
intel_gmch_panel_fitting(intel_crtc, pipe_config,
intel_connector->panel.fitting_mode);
- }
- drm_mode_set_crtcinfo(adjusted_mode, 0);
- pipe_config->timings_set = true;
+ }
/*
* XXX: It would be nice to support lower refresh rates on the
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 80bea1d..5950888 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -194,6 +194,9 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
adjusted_mode->vdisplay == mode->vdisplay)
goto out;
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+ pipe_config->timings_set = true;
+
switch (fitting_mode) {
case DRM_MODE_SCALE_CENTER:
/*
@@ -494,8 +497,11 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level, u32 max)
goto out;
}
- /* scale to hardware */
- level = level * freq / max;
+ /* scale to hardware, but be careful to not overflow */
+ if (freq < max)
+ level = level * freq / max;
+ else
+ level = freq / max * level;
dev_priv->backlight.level = level;
if (dev_priv->backlight.device)
@@ -512,6 +518,17 @@ void intel_panel_disable_backlight(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
+ /*
+ * Do not disable backlight on the vgaswitcheroo path. When switching
+ * away from i915, the other client may depend on i915 to handle the
+ * backlight. This will leave the backlight on unnecessarily when
+ * another client is not activated.
+ */
+ if (dev->switch_power_state == DRM_SWITCH_POWER_CHANGING) {
+ DRM_DEBUG_DRIVER("Skipping backlight disable on vga switch\n");
+ return;
+ }
+
spin_lock_irqsave(&dev_priv->backlight.lock, flags);
dev_priv->backlight.enabled = false;
@@ -580,7 +597,8 @@ void intel_panel_enable_backlight(struct drm_device *dev,
POSTING_READ(reg);
I915_WRITE(reg, tmp | BLM_PWM_ENABLE);
- if (HAS_PCH_SPLIT(dev)) {
+ if (HAS_PCH_SPLIT(dev) &&
+ !(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) {
tmp = I915_READ(BLC_PWM_PCH_CTL1);
tmp |= BLM_PCH_PWM_ENABLE;
tmp &= ~BLM_PCH_OVERRIDE_ENABLE;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index ccbdd83..b0e4a0b 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -5063,8 +5063,26 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
}
} else {
if (enable_requested) {
+ unsigned long irqflags;
+ enum pipe p;
+
I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
+ POSTING_READ(HSW_PWR_WELL_DRIVER);
DRM_DEBUG_KMS("Requesting to disable the power well\n");
+
+ /*
+ * After this, the registers on the pipes that are part
+ * of the power well will become zero, so we have to
+ * adjust our counters according to that.
+ *
+ * FIXME: Should we do this in general in
+ * drm_vblank_post_modeset?
+ */
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ for_each_pipe(p)
+ if (p != PIPE_A)
+ dev->last_vblank[p] = 0;
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
}
}
@@ -5476,7 +5494,7 @@ static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
gen6_gt_check_fifodbg(dev_priv);
}
-void intel_gt_reset(struct drm_device *dev)
+void intel_gt_sanitize(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5487,26 +5505,61 @@ void intel_gt_reset(struct drm_device *dev)
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
__gen6_gt_force_wake_mt_reset(dev_priv);
}
+
+ /* BIOS often leaves RC6 enabled, but disable it for hw init */
+ if (INTEL_INFO(dev)->gen >= 6)
+ intel_disable_gt_powersave(dev);
}
void intel_gt_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- spin_lock_init(&dev_priv->gt_lock);
-
- intel_gt_reset(dev);
-
if (IS_VALLEYVIEW(dev)) {
dev_priv->gt.force_wake_get = vlv_force_wake_get;
dev_priv->gt.force_wake_put = vlv_force_wake_put;
- } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
+ } else if (IS_HASWELL(dev)) {
dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get;
dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put;
+ } else if (IS_IVYBRIDGE(dev)) {
+ u32 ecobus;
+
+ /* IVB configs may use multi-threaded forcewake */
+
+ /* A small trick here - if the bios hasn't configured
+ * MT forcewake, and if the device is in RC6, then
+ * force_wake_mt_get will not wake the device and the
+ * ECOBUS read will return zero. Which will be
+ * (correctly) interpreted by the test below as MT
+ * forcewake being disabled.
+ */
+ mutex_lock(&dev->struct_mutex);
+ __gen6_gt_force_wake_mt_get(dev_priv);
+ ecobus = I915_READ_NOTRACE(ECOBUS);
+ __gen6_gt_force_wake_mt_put(dev_priv);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (ecobus & FORCEWAKE_MT_ENABLE) {
+ dev_priv->gt.force_wake_get =
+ __gen6_gt_force_wake_mt_get;
+ dev_priv->gt.force_wake_put =
+ __gen6_gt_force_wake_mt_put;
+ } else {
+ DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
+ DRM_INFO("when using vblank-synced partial screen updates.\n");
+ dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
+ dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
+ }
} else if (IS_GEN6(dev)) {
dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
}
+}
+
+void intel_pm_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
intel_gen6_powersave_work);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index e51ab55..664118d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -379,6 +379,17 @@ u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
return I915_READ(acthd_reg);
}
+static void ring_setup_phys_status_page(struct intel_ring_buffer *ring)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ u32 addr;
+
+ addr = dev_priv->status_page_dmah->busaddr;
+ if (INTEL_INFO(ring->dev)->gen >= 4)
+ addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
+ I915_WRITE(HWS_PGA, addr);
+}
+
static int init_ring_common(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
@@ -390,6 +401,11 @@ static int init_ring_common(struct intel_ring_buffer *ring)
if (HAS_FORCE_WAKE(dev))
gen6_gt_force_wake_get(dev_priv);
+ if (I915_NEED_GFX_HWS(dev))
+ intel_ring_setup_status_page(ring);
+ else
+ ring_setup_phys_status_page(ring);
+
/* Stop the ring if it's running. */
I915_WRITE_CTL(ring, 0);
I915_WRITE_HEAD(ring, 0);
@@ -518,9 +534,6 @@ cleanup_pipe_control(struct intel_ring_buffer *ring)
struct pipe_control *pc = ring->private;
struct drm_i915_gem_object *obj;
- if (!ring->private)
- return;
-
obj = pc->obj;
kunmap(sg_page(obj->pages->sgl));
@@ -528,7 +541,6 @@ cleanup_pipe_control(struct intel_ring_buffer *ring)
drm_gem_object_unreference(&obj->base);
kfree(pc);
- ring->private = NULL;
}
static int init_render_ring(struct intel_ring_buffer *ring)
@@ -601,7 +613,10 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring)
if (HAS_BROKEN_CS_TLB(dev))
drm_gem_object_unreference(to_gem_object(ring->private));
- cleanup_pipe_control(ring);
+ if (INTEL_INFO(dev)->gen >= 5)
+ cleanup_pipe_control(ring);
+
+ ring->private = NULL;
}
static void
@@ -1223,7 +1238,6 @@ static int init_status_page(struct intel_ring_buffer *ring)
ring->status_page.obj = obj;
memset(ring->status_page.page_addr, 0, PAGE_SIZE);
- intel_ring_setup_status_page(ring);
DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
ring->name, ring->status_page.gfx_addr);
@@ -1237,10 +1251,9 @@ err:
return ret;
}
-static int init_phys_hws_pga(struct intel_ring_buffer *ring)
+static int init_phys_status_page(struct intel_ring_buffer *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
- u32 addr;
if (!dev_priv->status_page_dmah) {
dev_priv->status_page_dmah =
@@ -1249,11 +1262,6 @@ static int init_phys_hws_pga(struct intel_ring_buffer *ring)
return -ENOMEM;
}
- addr = dev_priv->status_page_dmah->busaddr;
- if (INTEL_INFO(ring->dev)->gen >= 4)
- addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
- I915_WRITE(HWS_PGA, addr);
-
ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
memset(ring->status_page.page_addr, 0, PAGE_SIZE);
@@ -1281,7 +1289,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
return ret;
} else {
BUG_ON(ring->id != RCS);
- ret = init_phys_hws_pga(ring);
+ ret = init_phys_status_page(ring);
if (ret)
return ret;
}
@@ -1893,7 +1901,7 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
}
if (!I915_NEED_GFX_HWS(dev)) {
- ret = init_phys_hws_pga(ring);
+ ret = init_phys_status_page(ring);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 251784a..503a414 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -29,6 +29,7 @@ static void mga_crtc_load_lut(struct drm_crtc *crtc)
struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct mga_device *mdev = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->fb;
int i;
if (!crtc->enabled)
@@ -36,6 +37,28 @@ static void mga_crtc_load_lut(struct drm_crtc *crtc)
WREG8(DAC_INDEX + MGA1064_INDEX, 0);
+ if (fb && fb->bits_per_pixel == 16) {
+ int inc = (fb->depth == 15) ? 8 : 4;
+ u8 r, b;
+ for (i = 0; i < MGAG200_LUT_SIZE; i += inc) {
+ if (fb->depth == 16) {
+ if (i > (MGAG200_LUT_SIZE >> 1)) {
+ r = b = 0;
+ } else {
+ r = mga_crtc->lut_r[i << 1];
+ b = mga_crtc->lut_b[i << 1];
+ }
+ } else {
+ r = mga_crtc->lut_r[i];
+ b = mga_crtc->lut_b[i];
+ }
+ /* VGA registers */
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, r);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_g[i]);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, b);
+ }
+ return;
+ }
for (i = 0; i < MGAG200_LUT_SIZE; i++) {
/* VGA registers */
WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_r[i]);
@@ -877,7 +900,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
pitch = crtc->fb->pitches[0] / (crtc->fb->bits_per_pixel / 8);
if (crtc->fb->bits_per_pixel == 24)
- pitch = pitch >> (4 - bppshift);
+ pitch = (pitch * 3) >> (4 - bppshift);
else
pitch = pitch >> (4 - bppshift);
@@ -1251,6 +1274,24 @@ static void mga_crtc_destroy(struct drm_crtc *crtc)
kfree(mga_crtc);
}
+static void mga_crtc_disable(struct drm_crtc *crtc)
+{
+ int ret;
+ DRM_DEBUG_KMS("\n");
+ mga_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ if (crtc->fb) {
+ struct mga_framebuffer *mga_fb = to_mga_framebuffer(crtc->fb);
+ struct drm_gem_object *obj = mga_fb->obj;
+ struct mgag200_bo *bo = gem_to_mga_bo(obj);
+ ret = mgag200_bo_reserve(bo, false);
+ if (ret)
+ return;
+ mgag200_bo_push_sysram(bo);
+ mgag200_bo_unreserve(bo);
+ }
+ crtc->fb = NULL;
+}
+
/* These provide the minimum set of functions required to handle a CRTC */
static const struct drm_crtc_funcs mga_crtc_funcs = {
.cursor_set = mga_crtc_cursor_set,
@@ -1261,6 +1302,7 @@ static const struct drm_crtc_funcs mga_crtc_funcs = {
};
static const struct drm_crtc_helper_funcs mga_helper_funcs = {
+ .disable = mga_crtc_disable,
.dpms = mga_crtc_dpms,
.mode_fixup = mga_crtc_mode_fixup,
.mode_set = mga_crtc_mode_set,
@@ -1581,6 +1623,8 @@ static struct drm_connector *mga_vga_init(struct drm_device *dev)
drm_connector_helper_add(connector, &mga_vga_connector_helper_funcs);
+ drm_sysfs_connector_add(connector);
+
mga_connector->i2c = mgag200_i2c_create(dev);
if (!mga_connector->i2c)
DRM_ERROR("failed to add ddc bus\n");
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 3acb2b0..d70e4a9 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -323,6 +323,7 @@ int mgag200_bo_create(struct drm_device *dev, int size, int align,
mgabo->gem.driver_private = NULL;
mgabo->bo.bdev = &mdev->ttm.bdev;
+ mgabo->bo.bdev->dev_mapping = dev->dev_mapping;
mgag200_ttm_placement(mgabo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
@@ -353,6 +354,7 @@ int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr)
bo->pin_count++;
if (gpu_addr)
*gpu_addr = mgag200_bo_gpu_offset(bo);
+ return 0;
}
mgag200_ttm_placement(bo, pl_flag);
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c
index 262c9f5..ce860de 100644
--- a/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c
@@ -90,6 +90,7 @@ nvc0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
nv_subdev(priv)->unit = 0x00008000;
+ nv_subdev(priv)->intr = nouveau_falcon_intr;
nv_engine(priv)->cclass = &nvc0_bsp_cclass;
nv_engine(priv)->sclass = nvc0_bsp_sclass;
return 0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c
index c46882c..ba6aeca 100644
--- a/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c
@@ -90,6 +90,7 @@ nve0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
nv_subdev(priv)->unit = 0x00008000;
+ nv_subdev(priv)->intr = nouveau_falcon_intr;
nv_engine(priv)->cclass = &nve0_bsp_cclass;
nv_engine(priv)->sclass = nve0_bsp_sclass;
return 0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
index 373dbcc..a19e7d7 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
@@ -36,6 +36,8 @@ nva3_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size)
if (data && data[0]) {
for (i = 0; i < size; i++)
nv_wr32(priv, 0x61c440 + soff, (i << 8) | data[i]);
+ for (; i < 0x60; i++)
+ nv_wr32(priv, 0x61c440 + soff, (i << 8));
nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000003);
} else
if (data) {
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
index dc57e24..7176393 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
@@ -41,6 +41,8 @@ nvd0_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size)
if (data && data[0]) {
for (i = 0; i < size; i++)
nv_wr32(priv, 0x10ec00 + soff, (i << 8) | data[i]);
+ for (; i < 0x60; i++)
+ nv_wr32(priv, 0x10ec00 + soff, (i << 8));
nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000003);
} else
if (data) {
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
index ab1e918..526b752 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
@@ -47,14 +47,8 @@ int
nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
{
struct nv50_disp_priv *priv = (void *)object->engine;
- struct nouveau_bios *bios = nouveau_bios(priv);
- const u16 type = (mthd & NV50_DISP_SOR_MTHD_TYPE) >> 12;
const u8 head = (mthd & NV50_DISP_SOR_MTHD_HEAD) >> 3;
- const u8 link = (mthd & NV50_DISP_SOR_MTHD_LINK) >> 2;
const u8 or = (mthd & NV50_DISP_SOR_MTHD_OR);
- const u16 mask = (0x0100 << head) | (0x0040 << link) | (0x0001 << or);
- struct dcb_output outp;
- u8 ver, hdr;
u32 data;
int ret = -EINVAL;
@@ -62,8 +56,6 @@ nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
return -EINVAL;
data = *(u32 *)args;
- if (type && !dcb_outp_match(bios, type, mask, &ver, &hdr, &outp))
- return -ENODEV;
switch (mthd & ~0x3f) {
case NV50_DISP_SOR_PWR:
diff --git a/drivers/gpu/drm/nouveau/core/engine/falcon.c b/drivers/gpu/drm/nouveau/core/engine/falcon.c
index 3c7a31f..e03fc8e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/falcon.c
+++ b/drivers/gpu/drm/nouveau/core/engine/falcon.c
@@ -23,6 +23,25 @@
#include <engine/falcon.h>
#include <subdev/timer.h>
+void
+nouveau_falcon_intr(struct nouveau_subdev *subdev)
+{
+ struct nouveau_falcon *falcon = (void *)subdev;
+ u32 dispatch = nv_ro32(falcon, 0x01c);
+ u32 intr = nv_ro32(falcon, 0x008) & dispatch & ~(dispatch >> 16);
+
+ if (intr & 0x00000010) {
+ nv_debug(falcon, "ucode halted\n");
+ nv_wo32(falcon, 0x004, 0x00000010);
+ intr &= ~0x00000010;
+ }
+
+ if (intr) {
+ nv_error(falcon, "unhandled intr 0x%08x\n", intr);
+ nv_wo32(falcon, 0x004, intr);
+ }
+}
+
u32
_nouveau_falcon_rd32(struct nouveau_object *object, u64 addr)
{
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
index 49ecbb8..c190043 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
@@ -265,8 +265,8 @@ nv31_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
int
nv31_mpeg_init(struct nouveau_object *object)
{
- struct nouveau_engine *engine = nv_engine(object->engine);
- struct nv31_mpeg_priv *priv = (void *)engine;
+ struct nouveau_engine *engine = nv_engine(object);
+ struct nv31_mpeg_priv *priv = (void *)object;
struct nouveau_fb *pfb = nouveau_fb(object);
int ret, i;
@@ -284,7 +284,10 @@ nv31_mpeg_init(struct nouveau_object *object)
/* PMPEG init */
nv_wr32(priv, 0x00b32c, 0x00000000);
nv_wr32(priv, 0x00b314, 0x00000100);
- nv_wr32(priv, 0x00b220, nv44_graph_class(priv) ? 0x00000044 : 0x00000031);
+ if (nv_device(priv)->chipset >= 0x40 && nv44_graph_class(priv))
+ nv_wr32(priv, 0x00b220, 0x00000044);
+ else
+ nv_wr32(priv, 0x00b220, 0x00000031);
nv_wr32(priv, 0x00b300, 0x02001ec1);
nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
index f7c581a..dd61960 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
@@ -61,6 +61,7 @@ nv40_mpeg_context_ctor(struct nouveau_object *parent,
if (ret)
return ret;
+ nv_wo32(&chan->base.base, 0x78, 0x02001ec1);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c
index 98072c1..73719aa 100644
--- a/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c
@@ -90,6 +90,7 @@ nvc0_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
nv_subdev(priv)->unit = 0x00000002;
+ nv_subdev(priv)->intr = nouveau_falcon_intr;
nv_engine(priv)->cclass = &nvc0_ppp_cclass;
nv_engine(priv)->sclass = nvc0_ppp_sclass;
return 0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c
index 1879229..ac1f62a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c
@@ -90,6 +90,7 @@ nvc0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
nv_subdev(priv)->unit = 0x00020000;
+ nv_subdev(priv)->intr = nouveau_falcon_intr;
nv_engine(priv)->cclass = &nvc0_vp_cclass;
nv_engine(priv)->sclass = nvc0_vp_sclass;
return 0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c
index d28ecbf..d4c3108 100644
--- a/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c
@@ -90,6 +90,7 @@ nve0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
nv_subdev(priv)->unit = 0x00020000;
+ nv_subdev(priv)->intr = nouveau_falcon_intr;
nv_engine(priv)->cclass = &nve0_vp_cclass;
nv_engine(priv)->sclass = nve0_vp_sclass;
return 0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/xtensa.c b/drivers/gpu/drm/nouveau/core/engine/xtensa.c
index 0639bc5..5f6ede7 100644
--- a/drivers/gpu/drm/nouveau/core/engine/xtensa.c
+++ b/drivers/gpu/drm/nouveau/core/engine/xtensa.c
@@ -118,7 +118,13 @@ _nouveau_xtensa_init(struct nouveau_object *object)
return ret;
}
- ret = nouveau_gpuobj_new(object, NULL, fw->size, 0x1000, 0,
+ if (fw->size > 0x40000) {
+ nv_warn(xtensa, "firmware %s too large\n", name);
+ release_firmware(fw);
+ return -EINVAL;
+ }
+
+ ret = nouveau_gpuobj_new(object, NULL, 0x40000, 0x1000, 0,
&xtensa->gpu_fw);
if (ret) {
release_firmware(fw);
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/falcon.h b/drivers/gpu/drm/nouveau/core/include/engine/falcon.h
index 1edec38..181aa7d 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/falcon.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/falcon.h
@@ -72,6 +72,8 @@ int nouveau_falcon_create_(struct nouveau_object *, struct nouveau_object *,
struct nouveau_oclass *, u32, bool, const char *,
const char *, int, void **);
+void nouveau_falcon_intr(struct nouveau_subdev *subdev);
+
#define _nouveau_falcon_dtor _nouveau_engine_dtor
int _nouveau_falcon_init(struct nouveau_object *);
int _nouveau_falcon_fini(struct nouveau_object *, bool);
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/vm.h b/drivers/gpu/drm/nouveau/core/include/subdev/vm.h
index f2e87b1..fcf57fa 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/vm.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/vm.h
@@ -55,7 +55,7 @@ struct nouveau_vma {
struct nouveau_vm {
struct nouveau_vmmgr *vmm;
struct nouveau_mm mm;
- int refcount;
+ struct kref refcount;
struct list_head pgd_list;
atomic_t engref[NVDEV_SUBDEV_NR];
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
index 6c974dd..db9d6dd 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
@@ -81,7 +81,7 @@ void nv44_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
void nv46_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
u32 pitch, u32 flags, struct nouveau_fb_tile *);
-void nv50_ram_put(struct nouveau_fb *, struct nouveau_mem **);
+void __nv50_ram_put(struct nouveau_fb *, struct nouveau_mem *);
extern int nv50_fb_memtype[0x80];
#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
index af5aa7e..903baff 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
@@ -27,17 +27,10 @@
#include "priv.h"
void
-nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
+__nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem *mem)
{
struct nouveau_mm_node *this;
- struct nouveau_mem *mem;
- mem = *pmem;
- *pmem = NULL;
- if (unlikely(mem == NULL))
- return;
-
- mutex_lock(&pfb->base.mutex);
while (!list_empty(&mem->regions)) {
this = list_first_entry(&mem->regions, typeof(*this), rl_entry);
@@ -46,6 +39,19 @@ nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
}
nouveau_mm_free(&pfb->tags, &mem->tag);
+}
+
+void
+nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
+{
+ struct nouveau_mem *mem = *pmem;
+
+ *pmem = NULL;
+ if (unlikely(mem == NULL))
+ return;
+
+ mutex_lock(&pfb->base.mutex);
+ __nv50_ram_put(pfb, mem);
mutex_unlock(&pfb->base.mutex);
kfree(mem);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
index 9c3634a..cf97c4d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
@@ -33,11 +33,19 @@ void
nvc0_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
{
struct nouveau_ltcg *ltcg = nouveau_ltcg(pfb);
+ struct nouveau_mem *mem = *pmem;
- if ((*pmem)->tag)
- ltcg->tags_free(ltcg, &(*pmem)->tag);
+ *pmem = NULL;
+ if (unlikely(mem == NULL))
+ return;
- nv50_ram_put(pfb, pmem);
+ mutex_lock(&pfb->base.mutex);
+ if (mem->tag)
+ ltcg->tags_free(ltcg, &mem->tag);
+ __nv50_ram_put(pfb, mem);
+ mutex_unlock(&pfb->base.mutex);
+
+ kfree(mem);
}
int
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
index bf489dc..c4c1d41 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
@@ -103,7 +103,7 @@ nv50_gpio_intr(struct nouveau_subdev *subdev)
int i;
intr0 = nv_rd32(priv, 0xe054) & nv_rd32(priv, 0xe050);
- if (nv_device(priv)->chipset >= 0x90)
+ if (nv_device(priv)->chipset > 0x92)
intr1 = nv_rd32(priv, 0xe074) & nv_rd32(priv, 0xe070);
hi = (intr0 & 0x0000ffff) | (intr1 << 16);
@@ -115,7 +115,7 @@ nv50_gpio_intr(struct nouveau_subdev *subdev)
}
nv_wr32(priv, 0xe054, intr0);
- if (nv_device(priv)->chipset >= 0x90)
+ if (nv_device(priv)->chipset > 0x92)
nv_wr32(priv, 0xe074, intr1);
}
@@ -146,7 +146,7 @@ nv50_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
int ret;
ret = nouveau_gpio_create(parent, engine, oclass,
- nv_device(parent)->chipset >= 0x90 ? 32 : 16,
+ nv_device(parent)->chipset > 0x92 ? 32 : 16,
&priv);
*pobject = nv_object(priv);
if (ret)
@@ -182,7 +182,7 @@ nv50_gpio_init(struct nouveau_object *object)
/* disable, and ack any pending gpio interrupts */
nv_wr32(priv, 0xe050, 0x00000000);
nv_wr32(priv, 0xe054, 0xffffffff);
- if (nv_device(priv)->chipset >= 0x90) {
+ if (nv_device(priv)->chipset > 0x92) {
nv_wr32(priv, 0xe070, 0x00000000);
nv_wr32(priv, 0xe074, 0xffffffff);
}
@@ -195,7 +195,7 @@ nv50_gpio_fini(struct nouveau_object *object, bool suspend)
{
struct nv50_gpio_priv *priv = (void *)object;
nv_wr32(priv, 0xe050, 0x00000000);
- if (nv_device(priv)->chipset >= 0x90)
+ if (nv_device(priv)->chipset > 0x92)
nv_wr32(priv, 0xe070, 0x00000000);
return nouveau_gpio_fini(&priv->base, suspend);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
index 0cb322a..f25fc5f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
@@ -41,7 +41,7 @@ nv50_mc_intr[] = {
{ 0x04000000, NVDEV_ENGINE_DISP },
{ 0x10000000, NVDEV_SUBDEV_BUS },
{ 0x80000000, NVDEV_ENGINE_SW },
- { 0x0000d101, NVDEV_SUBDEV_FB },
+ { 0x0002d101, NVDEV_SUBDEV_FB },
{},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
index 67fcb6c..ef3133e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
@@ -361,7 +361,7 @@ nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
INIT_LIST_HEAD(&vm->pgd_list);
vm->vmm = vmm;
- vm->refcount = 1;
+ kref_init(&vm->refcount);
vm->fpde = offset >> (vmm->pgt_bits + 12);
vm->lpde = (offset + length - 1) >> (vmm->pgt_bits + 12);
@@ -441,8 +441,9 @@ nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd)
}
static void
-nouveau_vm_del(struct nouveau_vm *vm)
+nouveau_vm_del(struct kref *kref)
{
+ struct nouveau_vm *vm = container_of(kref, typeof(*vm), refcount);
struct nouveau_vm_pgd *vpgd, *tmp;
list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
@@ -458,27 +459,19 @@ int
nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr,
struct nouveau_gpuobj *pgd)
{
- struct nouveau_vm *vm;
- int ret;
-
- vm = ref;
- if (vm) {
- ret = nouveau_vm_link(vm, pgd);
+ if (ref) {
+ int ret = nouveau_vm_link(ref, pgd);
if (ret)
return ret;
- vm->refcount++;
+ kref_get(&ref->refcount);
}
- vm = *ptr;
- *ptr = ref;
-
- if (vm) {
- nouveau_vm_unlink(vm, pgd);
-
- if (--vm->refcount == 0)
- nouveau_vm_del(vm);
+ if (*ptr) {
+ nouveau_vm_unlink(*ptr, pgd);
+ kref_put(&(*ptr)->refcount, nouveau_vm_del);
}
+ *ptr = ref;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 4b1afb1..af20fba 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -148,6 +148,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
if (unlikely(nvbo->gem))
DRM_ERROR("bo %p still attached to GEM object\n", bo);
+ WARN_ON(nvbo->pin_refcnt > 0);
nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
kfree(nvbo);
}
@@ -197,6 +198,17 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
size_t acc_size;
int ret;
int type = ttm_bo_type_device;
+ int lpg_shift = 12;
+ int max_size;
+
+ if (drm->client.base.vm)
+ lpg_shift = drm->client.base.vm->vmm->lpg_shift;
+ max_size = INT_MAX & ~((1 << lpg_shift) - 1);
+
+ if (size <= 0 || size > max_size) {
+ nv_warn(drm, "skipped size %x\n", (u32)size);
+ return -EINVAL;
+ }
if (sg)
type = ttm_bo_type_sg;
@@ -340,13 +352,15 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_buffer_object *bo = &nvbo->bo;
- int ret;
+ int ret, ref;
ret = ttm_bo_reserve(bo, false, false, false, 0);
if (ret)
return ret;
- if (--nvbo->pin_refcnt)
+ ref = --nvbo->pin_refcnt;
+ WARN_ON_ONCE(ref < 0);
+ if (ref)
goto out;
nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
@@ -578,7 +592,7 @@ nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
int ret = RING_SPACE(chan, 2);
if (ret == 0) {
BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
- OUT_RING (chan, handle);
+ OUT_RING (chan, handle & 0x0000ffff);
FIRE_RING (chan);
}
return ret;
@@ -973,7 +987,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
struct ttm_mem_reg *old_mem = &bo->mem;
int ret;
- mutex_lock(&chan->cli->mutex);
+ mutex_lock_nested(&chan->cli->mutex, SINGLE_DEPTH_NESTING);
/* create temporary vmas for the transfer and attach them to the
* old nouveau_mem node, these will get cleaned up after ttm has
@@ -1014,7 +1028,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
struct ttm_mem_reg *, struct ttm_mem_reg *);
int (*init)(struct nouveau_channel *, u32 handle);
} _methods[] = {
- { "COPY", 0, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
+ { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
{ "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
{ "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
{ "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
@@ -1034,7 +1048,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
struct nouveau_channel *chan;
u32 handle = (mthd->engine << 16) | mthd->oclass;
- if (mthd->init == nve0_bo_move_init)
+ if (mthd->engine)
chan = drm->cechan;
else
chan = drm->channel;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 708b2d1..907d20e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -138,7 +138,7 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
{
struct nouveau_framebuffer *nouveau_fb;
struct drm_gem_object *gem;
- int ret;
+ int ret = -ENOMEM;
gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
if (!gem)
@@ -146,15 +146,19 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
nouveau_fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL);
if (!nouveau_fb)
- return ERR_PTR(-ENOMEM);
+ goto err_unref;
ret = nouveau_framebuffer_init(dev, nouveau_fb, mode_cmd, nouveau_gem_object(gem));
- if (ret) {
- drm_gem_object_unreference(gem);
- return ERR_PTR(ret);
- }
+ if (ret)
+ goto err;
return &nouveau_fb->base;
+
+err:
+ kfree(nouveau_fb);
+err_unref:
+ drm_gem_object_unreference(gem);
+ return ERR_PTR(ret);
}
static const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
@@ -524,9 +528,12 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct nouveau_page_flip_state *s;
struct nouveau_channel *chan = NULL;
struct nouveau_fence *fence;
- struct list_head res;
- struct ttm_validate_buffer res_val[2];
+ struct ttm_validate_buffer resv[2] = {
+ { .bo = &old_bo->bo },
+ { .bo = &new_bo->bo },
+ };
struct ww_acquire_ctx ticket;
+ LIST_HEAD(res);
int ret;
if (!drm->channel)
@@ -545,27 +552,19 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
chan = drm->channel;
spin_unlock(&old_bo->bo.bdev->fence_lock);
- mutex_lock(&chan->cli->mutex);
-
if (new_bo != old_bo) {
ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM);
- if (likely(!ret)) {
- res_val[0].bo = &old_bo->bo;
- res_val[1].bo = &new_bo->bo;
- INIT_LIST_HEAD(&res);
- list_add_tail(&res_val[0].head, &res);
- list_add_tail(&res_val[1].head, &res);
- ret = ttm_eu_reserve_buffers(&ticket, &res);
- if (ret)
- nouveau_bo_unpin(new_bo);
- }
- } else
- ret = ttm_bo_reserve(&new_bo->bo, false, false, false, 0);
+ if (ret)
+ goto fail_free;
- if (ret) {
- mutex_unlock(&chan->cli->mutex);
- goto fail_free;
+ list_add(&resv[1].head, &res);
}
+ list_add(&resv[0].head, &res);
+
+ mutex_lock(&chan->cli->mutex);
+ ret = ttm_eu_reserve_buffers(&ticket, &res);
+ if (ret)
+ goto fail_unpin;
/* Initialize a page flip struct */
*s = (struct nouveau_page_flip_state)
@@ -576,10 +575,8 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
/* Emit a page flip */
if (nv_device(drm->device)->card_type >= NV_50) {
ret = nv50_display_flip_next(crtc, fb, chan, 0);
- if (ret) {
- mutex_unlock(&chan->cli->mutex);
+ if (ret)
goto fail_unreserve;
- }
}
ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
@@ -590,22 +587,18 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
/* Update the crtc struct and cleanup */
crtc->fb = fb;
- if (old_bo != new_bo) {
- ttm_eu_fence_buffer_objects(&ticket, &res, fence);
+ ttm_eu_fence_buffer_objects(&ticket, &res, fence);
+ if (old_bo != new_bo)
nouveau_bo_unpin(old_bo);
- } else {
- nouveau_bo_fence(new_bo, fence);
- ttm_bo_unreserve(&new_bo->bo);
- }
nouveau_fence_unref(&fence);
return 0;
fail_unreserve:
- if (old_bo != new_bo) {
- ttm_eu_backoff_reservation(&ticket, &res);
+ ttm_eu_backoff_reservation(&ticket, &res);
+fail_unpin:
+ mutex_unlock(&chan->cli->mutex);
+ if (old_bo != new_bo)
nouveau_bo_unpin(new_bo);
- } else
- ttm_bo_unreserve(&new_bo->bo);
fail_free:
kfree(s);
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 218a4b5..6197266 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -192,6 +192,18 @@ nouveau_accel_init(struct nouveau_drm *drm)
arg0 = NVE0_CHANNEL_IND_ENGINE_GR;
arg1 = 1;
+ } else
+ if (device->chipset >= 0xa3 &&
+ device->chipset != 0xaa &&
+ device->chipset != 0xac) {
+ ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE,
+ NVDRM_CHAN + 1, NvDmaFB, NvDmaTT,
+ &drm->cechan);
+ if (ret)
+ NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
+
+ arg0 = NvDmaFB;
+ arg1 = NvDmaTT;
} else {
arg0 = NvDmaFB;
arg1 = NvDmaTT;
@@ -284,8 +296,6 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
return 0;
}
-static struct lock_class_key drm_client_lock_class_key;
-
static int
nouveau_drm_load(struct drm_device *dev, unsigned long flags)
{
@@ -297,7 +307,6 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
ret = nouveau_cli_create(pdev, "DRM", sizeof(*drm), (void**)&drm);
if (ret)
return ret;
- lockdep_set_class(&drm->client.mutex, &drm_client_lock_class_key);
dev->dev_private = drm;
drm->dev = dev;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 9352010..8f6d63d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -385,6 +385,7 @@ out_unlock:
mutex_unlock(&dev->struct_mutex);
if (chan)
nouveau_bo_vma_del(nvbo, &fbcon->nouveau_fb.vma);
+ nouveau_bo_unmap(nvbo);
out_unpin:
nouveau_bo_unpin(nvbo);
out_unref:
@@ -397,7 +398,8 @@ void
nouveau_fbcon_output_poll_changed(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- drm_fb_helper_hotplug_event(&drm->fbcon->helper);
+ if (drm->fbcon)
+ drm_fb_helper_hotplug_event(&drm->fbcon->helper);
}
static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 1680d91..be31499 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -143,7 +143,7 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
int ret;
fence->channel = chan;
- fence->timeout = jiffies + (3 * DRM_HZ);
+ fence->timeout = jiffies + (15 * DRM_HZ);
fence->sequence = ++fctx->sequence;
ret = fctx->emit(fence);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index e72d09c..830cb7b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -50,12 +50,6 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
return;
nvbo->gem = NULL;
- /* Lockdep hates you for doing reserve with gem object lock held */
- if (WARN_ON_ONCE(nvbo->pin_refcnt)) {
- nvbo->pin_refcnt = 1;
- nouveau_bo_unpin(nvbo);
- }
-
if (gem->import_attach)
drm_prime_gem_destroy(gem, nvbo->bo.sg);
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index 8e47a9b..22aa996 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -76,7 +76,7 @@ nv17_fence_context_new(struct nouveau_channel *chan)
struct ttm_mem_reg *mem = &priv->bo->bo.mem;
struct nouveau_object *object;
u32 start = mem->start * PAGE_SIZE;
- u32 limit = mem->start + mem->size - 1;
+ u32 limit = start + mem->size - 1;
int ret = 0;
fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 54dc635..8b40a36 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -355,6 +355,7 @@ struct nv50_oimm {
struct nv50_head {
struct nouveau_crtc base;
+ struct nouveau_bo *image;
struct nv50_curs curs;
struct nv50_sync sync;
struct nv50_ovly ovly;
@@ -517,9 +518,10 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
{
struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct nv50_head *head = nv50_head(crtc);
struct nv50_sync *sync = nv50_sync(crtc);
- int head = nv_crtc->index, ret;
u32 *push;
+ int ret;
swap_interval <<= 4;
if (swap_interval == 0)
@@ -537,7 +539,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
return ret;
BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
- OUT_RING (chan, NvEvoSema0 + head);
+ OUT_RING (chan, NvEvoSema0 + nv_crtc->index);
OUT_RING (chan, sync->addr ^ 0x10);
BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
OUT_RING (chan, sync->data + 1);
@@ -546,7 +548,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
OUT_RING (chan, sync->data);
} else
if (chan && nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) {
- u64 addr = nv84_fence_crtc(chan, head) + sync->addr;
+ u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr;
ret = RING_SPACE(chan, 12);
if (ret)
return ret;
@@ -565,7 +567,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL);
} else
if (chan) {
- u64 addr = nv84_fence_crtc(chan, head) + sync->addr;
+ u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr;
ret = RING_SPACE(chan, 10);
if (ret)
return ret;
@@ -630,6 +632,8 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
evo_mthd(push, 0x0080, 1);
evo_data(push, 0x00000000);
evo_kick(push, sync);
+
+ nouveau_bo_ref(nv_fb->nvbo, &head->image);
return 0;
}
@@ -1038,18 +1042,17 @@ static int
nv50_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
{
struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb);
+ struct nv50_head *head = nv50_head(crtc);
int ret;
ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM);
- if (ret)
- return ret;
-
- if (old_fb) {
- nvfb = nouveau_framebuffer(old_fb);
- nouveau_bo_unpin(nvfb->nvbo);
+ if (ret == 0) {
+ if (head->image)
+ nouveau_bo_unpin(head->image);
+ nouveau_bo_ref(nvfb->nvbo, &head->image);
}
- return 0;
+ return ret;
}
static int
@@ -1198,6 +1201,15 @@ nv50_crtc_lut_load(struct drm_crtc *crtc)
}
}
+static void
+nv50_crtc_disable(struct drm_crtc *crtc)
+{
+ struct nv50_head *head = nv50_head(crtc);
+ if (head->image)
+ nouveau_bo_unpin(head->image);
+ nouveau_bo_ref(NULL, &head->image);
+}
+
static int
nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
uint32_t handle, uint32_t width, uint32_t height)
@@ -1271,18 +1283,29 @@ nv50_crtc_destroy(struct drm_crtc *crtc)
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nv50_disp *disp = nv50_disp(crtc->dev);
struct nv50_head *head = nv50_head(crtc);
+
nv50_dmac_destroy(disp->core, &head->ovly.base);
nv50_pioc_destroy(disp->core, &head->oimm.base);
nv50_dmac_destroy(disp->core, &head->sync.base);
nv50_pioc_destroy(disp->core, &head->curs.base);
+
+ /*XXX: this shouldn't be necessary, but the core doesn't call
+ * disconnect() during the cleanup paths
+ */
+ if (head->image)
+ nouveau_bo_unpin(head->image);
+ nouveau_bo_ref(NULL, &head->image);
+
nouveau_bo_unmap(nv_crtc->cursor.nvbo);
if (nv_crtc->cursor.nvbo)
nouveau_bo_unpin(nv_crtc->cursor.nvbo);
nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
+
nouveau_bo_unmap(nv_crtc->lut.nvbo);
if (nv_crtc->lut.nvbo)
nouveau_bo_unpin(nv_crtc->lut.nvbo);
nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
+
drm_crtc_cleanup(crtc);
kfree(crtc);
}
@@ -1296,6 +1319,7 @@ static const struct drm_crtc_helper_funcs nv50_crtc_hfunc = {
.mode_set_base = nv50_crtc_mode_set_base,
.mode_set_base_atomic = nv50_crtc_mode_set_base_atomic,
.load_lut = nv50_crtc_lut_load,
+ .disable = nv50_crtc_disable,
};
static const struct drm_crtc_funcs nv50_crtc_func = {
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index f9701e5..0ee3638 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -39,6 +39,8 @@ nv50_fence_context_new(struct nouveau_channel *chan)
struct nv10_fence_chan *fctx;
struct ttm_mem_reg *mem = &priv->bo->bo.mem;
struct nouveau_object *object;
+ u32 start = mem->start * PAGE_SIZE;
+ u32 limit = start + mem->size - 1;
int ret, i;
fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
@@ -51,26 +53,28 @@ nv50_fence_context_new(struct nouveau_channel *chan)
fctx->base.sync = nv17_fence_sync;
ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
- NvSema, 0x0002,
+ NvSema, 0x003d,
&(struct nv_dma_class) {
.flags = NV_DMA_TARGET_VRAM |
NV_DMA_ACCESS_RDWR,
- .start = mem->start * PAGE_SIZE,
- .limit = mem->size - 1,
+ .start = start,
+ .limit = limit,
}, sizeof(struct nv_dma_class),
&object);
/* dma objects for display sync channel semaphore blocks */
for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) {
struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
+ u32 start = bo->bo.mem.start * PAGE_SIZE;
+ u32 limit = start + bo->bo.mem.size - 1;
ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
NvEvoSema0 + i, 0x003d,
&(struct nv_dma_class) {
.flags = NV_DMA_TARGET_VRAM |
NV_DMA_ACCESS_RDWR,
- .start = bo->bo.offset,
- .limit = bo->bo.offset + 0xfff,
+ .start = start,
+ .limit = limit,
}, sizeof(struct nv_dma_class),
&object);
}
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index 93c2f2c..eb89653 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -179,9 +179,10 @@ qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *relea
uint32_t type, bool interruptible)
{
struct qxl_command cmd;
+ struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
cmd.type = type;
- cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset);
+ cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset);
return qxl_ring_push(qdev->command_ring, &cmd, interruptible);
}
@@ -191,9 +192,10 @@ qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *releas
uint32_t type, bool interruptible)
{
struct qxl_command cmd;
+ struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
cmd.type = type;
- cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset);
+ cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset);
return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible);
}
@@ -214,7 +216,6 @@ int qxl_garbage_collect(struct qxl_device *qdev)
struct qxl_release *release;
uint64_t id, next_id;
int i = 0;
- int ret;
union qxl_release_info *info;
while (qxl_ring_pop(qdev->release_ring, &id)) {
@@ -224,17 +225,10 @@ int qxl_garbage_collect(struct qxl_device *qdev)
if (release == NULL)
break;
- ret = qxl_release_reserve(qdev, release, false);
- if (ret) {
- qxl_io_log(qdev, "failed to reserve release on garbage collect %lld\n", id);
- DRM_ERROR("failed to reserve release %lld\n", id);
- }
-
info = qxl_release_map(qdev, release);
next_id = info->next;
qxl_release_unmap(qdev, release, info);
- qxl_release_unreserve(qdev, release);
QXL_INFO(qdev, "popped %lld, next %lld\n", id,
next_id);
@@ -259,27 +253,29 @@ int qxl_garbage_collect(struct qxl_device *qdev)
return i;
}
-int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size,
+int qxl_alloc_bo_reserved(struct qxl_device *qdev,
+ struct qxl_release *release,
+ unsigned long size,
struct qxl_bo **_bo)
{
struct qxl_bo *bo;
int ret;
ret = qxl_bo_create(qdev, size, false /* not kernel - device */,
- QXL_GEM_DOMAIN_VRAM, NULL, &bo);
+ false, QXL_GEM_DOMAIN_VRAM, NULL, &bo);
if (ret) {
DRM_ERROR("failed to allocate VRAM BO\n");
return ret;
}
- ret = qxl_bo_reserve(bo, false);
- if (unlikely(ret != 0))
+ ret = qxl_release_list_add(release, bo);
+ if (ret)
goto out_unref;
*_bo = bo;
return 0;
out_unref:
qxl_bo_unref(&bo);
- return 0;
+ return ret;
}
static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr)
@@ -503,6 +499,10 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
if (ret)
return ret;
+ ret = qxl_release_reserve_list(release, true);
+ if (ret)
+ return ret;
+
cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_SURFACE_CMD_CREATE;
cmd->u.surface_create.format = surf->surf.format;
@@ -524,14 +524,11 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
surf->surf_create = release;
- /* no need to add a release to the fence for this bo,
+ /* no need to add a release to the fence for this surface bo,
since it is only released when we ask to destroy the surface
and it would never signal otherwise */
- qxl_fence_releaseable(qdev, release);
-
qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
-
- qxl_release_unreserve(qdev, release);
+ qxl_release_fence_buffer_objects(release);
surf->hw_surf_alloc = true;
spin_lock(&qdev->surf_id_idr_lock);
@@ -573,12 +570,9 @@ int qxl_hw_surface_dealloc(struct qxl_device *qdev,
cmd->surface_id = id;
qxl_release_unmap(qdev, release, &cmd->release_info);
- qxl_fence_releaseable(qdev, release);
-
qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
- qxl_release_unreserve(qdev, release);
-
+ qxl_release_fence_buffer_objects(release);
return 0;
}
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index f76f5dd..835caba 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -179,7 +179,7 @@ static void qxl_crtc_destroy(struct drm_crtc *crtc)
kfree(qxl_crtc);
}
-static void
+static int
qxl_hide_cursor(struct qxl_device *qdev)
{
struct qxl_release *release;
@@ -188,14 +188,22 @@ qxl_hide_cursor(struct qxl_device *qdev)
ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD,
&release, NULL);
+ if (ret)
+ return ret;
+
+ ret = qxl_release_reserve_list(release, true);
+ if (ret) {
+ qxl_release_free(qdev, release);
+ return ret;
+ }
cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_CURSOR_HIDE;
qxl_release_unmap(qdev, release, &cmd->release_info);
- qxl_fence_releaseable(qdev, release);
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
- qxl_release_unreserve(qdev, release);
+ qxl_release_fence_buffer_objects(release);
+ return 0;
}
static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
@@ -216,10 +224,8 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
int size = 64*64*4;
int ret = 0;
- if (!handle) {
- qxl_hide_cursor(qdev);
- return 0;
- }
+ if (!handle)
+ return qxl_hide_cursor(qdev);
obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
if (!obj) {
@@ -234,8 +240,9 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
goto out_unref;
ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL);
+ qxl_bo_unreserve(user_bo);
if (ret)
- goto out_unreserve;
+ goto out_unref;
ret = qxl_bo_kmap(user_bo, &user_ptr);
if (ret)
@@ -246,14 +253,20 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
&release, NULL);
if (ret)
goto out_kunmap;
- ret = qxl_alloc_bo_reserved(qdev, sizeof(struct qxl_cursor) + size,
- &cursor_bo);
+
+ ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_cursor) + size,
+ &cursor_bo);
if (ret)
goto out_free_release;
- ret = qxl_bo_kmap(cursor_bo, (void **)&cursor);
+
+ ret = qxl_release_reserve_list(release, false);
if (ret)
goto out_free_bo;
+ ret = qxl_bo_kmap(cursor_bo, (void **)&cursor);
+ if (ret)
+ goto out_backoff;
+
cursor->header.unique = 0;
cursor->header.type = SPICE_CURSOR_TYPE_ALPHA;
cursor->header.width = 64;
@@ -269,11 +282,7 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
qxl_bo_kunmap(cursor_bo);
- /* finish with the userspace bo */
qxl_bo_kunmap(user_bo);
- qxl_bo_unpin(user_bo);
- qxl_bo_unreserve(user_bo);
- drm_gem_object_unreference_unlocked(obj);
cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_CURSOR_SET;
@@ -281,30 +290,35 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
cmd->u.set.position.y = qcrtc->cur_y;
cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0);
- qxl_release_add_res(qdev, release, cursor_bo);
cmd->u.set.visible = 1;
qxl_release_unmap(qdev, release, &cmd->release_info);
- qxl_fence_releaseable(qdev, release);
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
- qxl_release_unreserve(qdev, release);
+ qxl_release_fence_buffer_objects(release);
+
+ /* finish with the userspace bo */
+ ret = qxl_bo_reserve(user_bo, false);
+ if (!ret) {
+ qxl_bo_unpin(user_bo);
+ qxl_bo_unreserve(user_bo);
+ }
+ drm_gem_object_unreference_unlocked(obj);
- qxl_bo_unreserve(cursor_bo);
qxl_bo_unref(&cursor_bo);
return ret;
+
+out_backoff:
+ qxl_release_backoff_reserve_list(release);
out_free_bo:
qxl_bo_unref(&cursor_bo);
out_free_release:
- qxl_release_unreserve(qdev, release);
qxl_release_free(qdev, release);
out_kunmap:
qxl_bo_kunmap(user_bo);
out_unpin:
qxl_bo_unpin(user_bo);
-out_unreserve:
- qxl_bo_unreserve(user_bo);
out_unref:
drm_gem_object_unreference_unlocked(obj);
return ret;
@@ -322,6 +336,14 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD,
&release, NULL);
+ if (ret)
+ return ret;
+
+ ret = qxl_release_reserve_list(release, true);
+ if (ret) {
+ qxl_release_free(qdev, release);
+ return ret;
+ }
qcrtc->cur_x = x;
qcrtc->cur_y = y;
@@ -332,9 +354,9 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
cmd->u.position.y = qcrtc->cur_y;
qxl_release_unmap(qdev, release, &cmd->release_info);
- qxl_fence_releaseable(qdev, release);
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
- qxl_release_unreserve(qdev, release);
+ qxl_release_fence_buffer_objects(release);
+
return 0;
}
diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c
index 3c8c3db..56e1d63 100644
--- a/drivers/gpu/drm/qxl/qxl_draw.c
+++ b/drivers/gpu/drm/qxl/qxl_draw.c
@@ -23,25 +23,29 @@
#include "qxl_drv.h"
#include "qxl_object.h"
+static int alloc_clips(struct qxl_device *qdev,
+ struct qxl_release *release,
+ unsigned num_clips,
+ struct qxl_bo **clips_bo)
+{
+ int size = sizeof(struct qxl_clip_rects) + sizeof(struct qxl_rect) * num_clips;
+
+ return qxl_alloc_bo_reserved(qdev, release, size, clips_bo);
+}
+
/* returns a pointer to the already allocated qxl_rect array inside
* the qxl_clip_rects. This is *not* the same as the memory allocated
* on the device, it is offset to qxl_clip_rects.chunk.data */
static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev,
struct qxl_drawable *drawable,
unsigned num_clips,
- struct qxl_bo **clips_bo,
- struct qxl_release *release)
+ struct qxl_bo *clips_bo)
{
struct qxl_clip_rects *dev_clips;
int ret;
- int size = sizeof(*dev_clips) + sizeof(struct qxl_rect) * num_clips;
- ret = qxl_alloc_bo_reserved(qdev, size, clips_bo);
- if (ret)
- return NULL;
- ret = qxl_bo_kmap(*clips_bo, (void **)&dev_clips);
+ ret = qxl_bo_kmap(clips_bo, (void **)&dev_clips);
if (ret) {
- qxl_bo_unref(clips_bo);
return NULL;
}
dev_clips->num_rects = num_clips;
@@ -52,20 +56,34 @@ static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev,
}
static int
+alloc_drawable(struct qxl_device *qdev, struct qxl_release **release)
+{
+ int ret;
+ ret = qxl_alloc_release_reserved(qdev, sizeof(struct qxl_drawable),
+ QXL_RELEASE_DRAWABLE, release,
+ NULL);
+ return ret;
+}
+
+static void
+free_drawable(struct qxl_device *qdev, struct qxl_release *release)
+{
+ qxl_release_free(qdev, release);
+}
+
+/* release needs to be reserved at this point */
+static int
make_drawable(struct qxl_device *qdev, int surface, uint8_t type,
const struct qxl_rect *rect,
- struct qxl_release **release)
+ struct qxl_release *release)
{
struct qxl_drawable *drawable;
- int i, ret;
+ int i;
- ret = qxl_alloc_release_reserved(qdev, sizeof(*drawable),
- QXL_RELEASE_DRAWABLE, release,
- NULL);
- if (ret)
- return ret;
+ drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
+ if (!drawable)
+ return -ENOMEM;
- drawable = (struct qxl_drawable *)qxl_release_map(qdev, *release);
drawable->type = type;
drawable->surface_id = surface; /* Only primary for now */
@@ -91,14 +109,23 @@ make_drawable(struct qxl_device *qdev, int surface, uint8_t type,
drawable->bbox = *rect;
drawable->mm_time = qdev->rom->mm_clock;
- qxl_release_unmap(qdev, *release, &drawable->release_info);
+ qxl_release_unmap(qdev, release, &drawable->release_info);
return 0;
}
-static int qxl_palette_create_1bit(struct qxl_bo **palette_bo,
+static int alloc_palette_object(struct qxl_device *qdev,
+ struct qxl_release *release,
+ struct qxl_bo **palette_bo)
+{
+ return qxl_alloc_bo_reserved(qdev, release,
+ sizeof(struct qxl_palette) + sizeof(uint32_t) * 2,
+ palette_bo);
+}
+
+static int qxl_palette_create_1bit(struct qxl_bo *palette_bo,
+ struct qxl_release *release,
const struct qxl_fb_image *qxl_fb_image)
{
- struct qxl_device *qdev = qxl_fb_image->qdev;
const struct fb_image *fb_image = &qxl_fb_image->fb_image;
uint32_t visual = qxl_fb_image->visual;
const uint32_t *pseudo_palette = qxl_fb_image->pseudo_palette;
@@ -108,12 +135,7 @@ static int qxl_palette_create_1bit(struct qxl_bo **palette_bo,
static uint64_t unique; /* we make no attempt to actually set this
* correctly globaly, since that would require
* tracking all of our palettes. */
-
- ret = qxl_alloc_bo_reserved(qdev,
- sizeof(struct qxl_palette) + sizeof(uint32_t) * 2,
- palette_bo);
-
- ret = qxl_bo_kmap(*palette_bo, (void **)&pal);
+ ret = qxl_bo_kmap(palette_bo, (void **)&pal);
pal->num_ents = 2;
pal->unique = unique++;
if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) {
@@ -126,7 +148,7 @@ static int qxl_palette_create_1bit(struct qxl_bo **palette_bo,
}
pal->ents[0] = bgcolor;
pal->ents[1] = fgcolor;
- qxl_bo_kunmap(*palette_bo);
+ qxl_bo_kunmap(palette_bo);
return 0;
}
@@ -144,44 +166,63 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
const char *src = fb_image->data;
int depth = fb_image->depth;
struct qxl_release *release;
- struct qxl_bo *image_bo;
struct qxl_image *image;
int ret;
-
+ struct qxl_drm_image *dimage;
+ struct qxl_bo *palette_bo = NULL;
if (stride == 0)
stride = depth * width / 8;
+ ret = alloc_drawable(qdev, &release);
+ if (ret)
+ return;
+
+ ret = qxl_image_alloc_objects(qdev, release,
+ &dimage,
+ height, stride);
+ if (ret)
+ goto out_free_drawable;
+
+ if (depth == 1) {
+ ret = alloc_palette_object(qdev, release, &palette_bo);
+ if (ret)
+ goto out_free_image;
+ }
+
+ /* do a reservation run over all the objects we just allocated */
+ ret = qxl_release_reserve_list(release, true);
+ if (ret)
+ goto out_free_palette;
+
rect.left = x;
rect.right = x + width;
rect.top = y;
rect.bottom = y + height;
- ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &rect, &release);
- if (ret)
- return;
+ ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &rect, release);
+ if (ret) {
+ qxl_release_backoff_reserve_list(release);
+ goto out_free_palette;
+ }
- ret = qxl_image_create(qdev, release, &image_bo,
- (const uint8_t *)src, 0, 0,
- width, height, depth, stride);
+ ret = qxl_image_init(qdev, release, dimage,
+ (const uint8_t *)src, 0, 0,
+ width, height, depth, stride);
if (ret) {
- qxl_release_unreserve(qdev, release);
+ qxl_release_backoff_reserve_list(release);
qxl_release_free(qdev, release);
return;
}
if (depth == 1) {
- struct qxl_bo *palette_bo;
void *ptr;
- ret = qxl_palette_create_1bit(&palette_bo, qxl_fb_image);
- qxl_release_add_res(qdev, release, palette_bo);
+ ret = qxl_palette_create_1bit(palette_bo, release, qxl_fb_image);
- ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0);
+ ptr = qxl_bo_kmap_atomic_page(qdev, dimage->bo, 0);
image = ptr;
image->u.bitmap.palette =
qxl_bo_physical_address(qdev, palette_bo, 0);
- qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
- qxl_bo_unreserve(palette_bo);
- qxl_bo_unref(&palette_bo);
+ qxl_bo_kunmap_atomic_page(qdev, dimage->bo, ptr);
}
drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
@@ -199,16 +240,20 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
drawable->u.copy.mask.bitmap = 0;
drawable->u.copy.src_bitmap =
- qxl_bo_physical_address(qdev, image_bo, 0);
+ qxl_bo_physical_address(qdev, dimage->bo, 0);
qxl_release_unmap(qdev, release, &drawable->release_info);
- qxl_release_add_res(qdev, release, image_bo);
- qxl_bo_unreserve(image_bo);
- qxl_bo_unref(&image_bo);
-
- qxl_fence_releaseable(qdev, release);
qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
- qxl_release_unreserve(qdev, release);
+ qxl_release_fence_buffer_objects(release);
+
+out_free_palette:
+ if (palette_bo)
+ qxl_bo_unref(&palette_bo);
+out_free_image:
+ qxl_image_free_objects(qdev, dimage);
+out_free_drawable:
+ if (ret)
+ free_drawable(qdev, release);
}
/* push a draw command using the given clipping rectangles as
@@ -243,10 +288,14 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
int depth = qxl_fb->base.bits_per_pixel;
uint8_t *surface_base;
struct qxl_release *release;
- struct qxl_bo *image_bo;
struct qxl_bo *clips_bo;
+ struct qxl_drm_image *dimage;
int ret;
+ ret = alloc_drawable(qdev, &release);
+ if (ret)
+ return;
+
left = clips->x1;
right = clips->x2;
top = clips->y1;
@@ -263,36 +312,52 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
width = right - left;
height = bottom - top;
+
+ ret = alloc_clips(qdev, release, num_clips, &clips_bo);
+ if (ret)
+ goto out_free_drawable;
+
+ ret = qxl_image_alloc_objects(qdev, release,
+ &dimage,
+ height, stride);
+ if (ret)
+ goto out_free_clips;
+
+ /* do a reservation run over all the objects we just allocated */
+ ret = qxl_release_reserve_list(release, true);
+ if (ret)
+ goto out_free_image;
+
drawable_rect.left = left;
drawable_rect.right = right;
drawable_rect.top = top;
drawable_rect.bottom = bottom;
+
ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &drawable_rect,
- &release);
+ release);
if (ret)
- return;
+ goto out_release_backoff;
ret = qxl_bo_kmap(bo, (void **)&surface_base);
if (ret)
- goto out_unref;
+ goto out_release_backoff;
- ret = qxl_image_create(qdev, release, &image_bo, surface_base,
- left, top, width, height, depth, stride);
+
+ ret = qxl_image_init(qdev, release, dimage, surface_base,
+ left, top, width, height, depth, stride);
qxl_bo_kunmap(bo);
if (ret)
- goto out_unref;
+ goto out_release_backoff;
+
+ rects = drawable_set_clipping(qdev, drawable, num_clips, clips_bo);
+ if (!rects)
+ goto out_release_backoff;
- rects = drawable_set_clipping(qdev, drawable, num_clips, &clips_bo, release);
- if (!rects) {
- qxl_bo_unref(&image_bo);
- goto out_unref;
- }
drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
drawable->clip.type = SPICE_CLIP_TYPE_RECTS;
drawable->clip.data = qxl_bo_physical_address(qdev,
clips_bo, 0);
- qxl_release_add_res(qdev, release, clips_bo);
drawable->u.copy.src_area.top = 0;
drawable->u.copy.src_area.bottom = height;
@@ -306,11 +371,9 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
drawable->u.copy.mask.pos.y = 0;
drawable->u.copy.mask.bitmap = 0;
- drawable->u.copy.src_bitmap = qxl_bo_physical_address(qdev, image_bo, 0);
+ drawable->u.copy.src_bitmap = qxl_bo_physical_address(qdev, dimage->bo, 0);
qxl_release_unmap(qdev, release, &drawable->release_info);
- qxl_release_add_res(qdev, release, image_bo);
- qxl_bo_unreserve(image_bo);
- qxl_bo_unref(&image_bo);
+
clips_ptr = clips;
for (i = 0; i < num_clips; i++, clips_ptr += inc) {
rects[i].left = clips_ptr->x1;
@@ -319,17 +382,22 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
rects[i].bottom = clips_ptr->y2;
}
qxl_bo_kunmap(clips_bo);
- qxl_bo_unreserve(clips_bo);
- qxl_bo_unref(&clips_bo);
- qxl_fence_releaseable(qdev, release);
qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
- qxl_release_unreserve(qdev, release);
- return;
+ qxl_release_fence_buffer_objects(release);
+
+out_release_backoff:
+ if (ret)
+ qxl_release_backoff_reserve_list(release);
+out_free_image:
+ qxl_image_free_objects(qdev, dimage);
+out_free_clips:
+ qxl_bo_unref(&clips_bo);
+out_free_drawable:
+ /* only free drawable on error */
+ if (ret)
+ free_drawable(qdev, release);
-out_unref:
- qxl_release_unreserve(qdev, release);
- qxl_release_free(qdev, release);
}
void qxl_draw_copyarea(struct qxl_device *qdev,
@@ -342,22 +410,36 @@ void qxl_draw_copyarea(struct qxl_device *qdev,
struct qxl_release *release;
int ret;
+ ret = alloc_drawable(qdev, &release);
+ if (ret)
+ return;
+
+ /* do a reservation run over all the objects we just allocated */
+ ret = qxl_release_reserve_list(release, true);
+ if (ret)
+ goto out_free_release;
+
rect.left = dx;
rect.top = dy;
rect.right = dx + width;
rect.bottom = dy + height;
- ret = make_drawable(qdev, 0, QXL_COPY_BITS, &rect, &release);
- if (ret)
- return;
+ ret = make_drawable(qdev, 0, QXL_COPY_BITS, &rect, release);
+ if (ret) {
+ qxl_release_backoff_reserve_list(release);
+ goto out_free_release;
+ }
drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
drawable->u.copy_bits.src_pos.x = sx;
drawable->u.copy_bits.src_pos.y = sy;
-
qxl_release_unmap(qdev, release, &drawable->release_info);
- qxl_fence_releaseable(qdev, release);
+
qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
- qxl_release_unreserve(qdev, release);
+ qxl_release_fence_buffer_objects(release);
+
+out_free_release:
+ if (ret)
+ free_drawable(qdev, release);
}
void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec)
@@ -370,10 +452,21 @@ void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec)
struct qxl_release *release;
int ret;
- ret = make_drawable(qdev, 0, QXL_DRAW_FILL, &rect, &release);
+ ret = alloc_drawable(qdev, &release);
if (ret)
return;
+ /* do a reservation run over all the objects we just allocated */
+ ret = qxl_release_reserve_list(release, true);
+ if (ret)
+ goto out_free_release;
+
+ ret = make_drawable(qdev, 0, QXL_DRAW_FILL, &rect, release);
+ if (ret) {
+ qxl_release_backoff_reserve_list(release);
+ goto out_free_release;
+ }
+
drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
drawable->u.fill.brush.type = SPICE_BRUSH_TYPE_SOLID;
drawable->u.fill.brush.u.color = color;
@@ -384,7 +477,11 @@ void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec)
drawable->u.fill.mask.bitmap = 0;
qxl_release_unmap(qdev, release, &drawable->release_info);
- qxl_fence_releaseable(qdev, release);
+
qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
- qxl_release_unreserve(qdev, release);
+ qxl_release_fence_buffer_objects(release);
+
+out_free_release:
+ if (ret)
+ free_drawable(qdev, release);
}
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index aacb791..7e96f4f 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -42,6 +42,9 @@
#include <ttm/ttm_placement.h>
#include <ttm/ttm_module.h>
+/* just for ttm_validate_buffer */
+#include <ttm/ttm_execbuf_util.h>
+
#include <drm/qxl_drm.h>
#include "qxl_dev.h"
@@ -118,9 +121,9 @@ struct qxl_bo {
uint32_t surface_id;
struct qxl_fence fence; /* per bo fence - list of releases */
struct qxl_release *surf_create;
- atomic_t reserve_count;
};
#define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, gem_base)
+#define to_qxl_bo(tobj) container_of((tobj), struct qxl_bo, tbo)
struct qxl_gem {
struct mutex mutex;
@@ -128,12 +131,7 @@ struct qxl_gem {
};
struct qxl_bo_list {
- struct list_head lhead;
- struct qxl_bo *bo;
-};
-
-struct qxl_reloc_list {
- struct list_head bos;
+ struct ttm_validate_buffer tv;
};
struct qxl_crtc {
@@ -195,10 +193,20 @@ enum {
struct qxl_release {
int id;
int type;
- int bo_count;
uint32_t release_offset;
uint32_t surface_release_id;
- struct qxl_bo *bos[QXL_MAX_RES];
+ struct ww_acquire_ctx ticket;
+ struct list_head bos;
+};
+
+struct qxl_drm_chunk {
+ struct list_head head;
+ struct qxl_bo *bo;
+};
+
+struct qxl_drm_image {
+ struct qxl_bo *bo;
+ struct list_head chunk_list;
};
struct qxl_fb_image {
@@ -314,6 +322,7 @@ struct qxl_device {
struct workqueue_struct *gc_queue;
struct work_struct gc_work;
+ struct work_struct fb_work;
};
/* forward declaration for QXL_INFO_IO */
@@ -433,12 +442,19 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma);
/* qxl image */
-int qxl_image_create(struct qxl_device *qdev,
- struct qxl_release *release,
- struct qxl_bo **image_bo,
- const uint8_t *data,
- int x, int y, int width, int height,
- int depth, int stride);
+int qxl_image_init(struct qxl_device *qdev,
+ struct qxl_release *release,
+ struct qxl_drm_image *dimage,
+ const uint8_t *data,
+ int x, int y, int width, int height,
+ int depth, int stride);
+int
+qxl_image_alloc_objects(struct qxl_device *qdev,
+ struct qxl_release *release,
+ struct qxl_drm_image **image_ptr,
+ int height, int stride);
+void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage);
+
void qxl_update_screen(struct qxl_device *qxl);
/* qxl io operations (qxl_cmd.c) */
@@ -459,20 +475,15 @@ int qxl_ring_push(struct qxl_ring *ring, const void *new_elt, bool interruptible
void qxl_io_flush_release(struct qxl_device *qdev);
void qxl_io_flush_surfaces(struct qxl_device *qdev);
-int qxl_release_reserve(struct qxl_device *qdev,
- struct qxl_release *release, bool no_wait);
-void qxl_release_unreserve(struct qxl_device *qdev,
- struct qxl_release *release);
union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
struct qxl_release *release);
void qxl_release_unmap(struct qxl_device *qdev,
struct qxl_release *release,
union qxl_release_info *info);
-/*
- * qxl_bo_add_resource.
- *
- */
-void qxl_bo_add_resource(struct qxl_bo *main_bo, struct qxl_bo *resource);
+int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo);
+int qxl_release_reserve_list(struct qxl_release *release, bool no_intr);
+void qxl_release_backoff_reserve_list(struct qxl_release *release);
+void qxl_release_fence_buffer_objects(struct qxl_release *release);
int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
enum qxl_surface_cmd_type surface_cmd_type,
@@ -481,15 +492,16 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
int type, struct qxl_release **release,
struct qxl_bo **rbo);
-int qxl_fence_releaseable(struct qxl_device *qdev,
- struct qxl_release *release);
+
int
qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release,
uint32_t type, bool interruptible);
int
qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release,
uint32_t type, bool interruptible);
-int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size,
+int qxl_alloc_bo_reserved(struct qxl_device *qdev,
+ struct qxl_release *release,
+ unsigned long size,
struct qxl_bo **_bo);
/* qxl drawing commands */
@@ -510,15 +522,9 @@ void qxl_draw_copyarea(struct qxl_device *qdev,
u32 sx, u32 sy,
u32 dx, u32 dy);
-uint64_t
-qxl_release_alloc(struct qxl_device *qdev, int type,
- struct qxl_release **ret);
-
void qxl_release_free(struct qxl_device *qdev,
struct qxl_release *release);
-void qxl_release_add_res(struct qxl_device *qdev,
- struct qxl_release *release,
- struct qxl_bo *bo);
+
/* used by qxl_debugfs_release */
struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
uint64_t id);
@@ -561,7 +567,7 @@ void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool freein
int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf);
/* qxl_fence.c */
-int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id);
+void qxl_fence_add_release_locked(struct qxl_fence *qfence, uint32_t rel_id);
int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id);
int qxl_fence_init(struct qxl_device *qdev, struct qxl_fence *qfence);
void qxl_fence_fini(struct qxl_fence *qfence);
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 76f39d8..88722f2 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -37,12 +37,29 @@
#define QXL_DIRTY_DELAY (HZ / 30)
+#define QXL_FB_OP_FILLRECT 1
+#define QXL_FB_OP_COPYAREA 2
+#define QXL_FB_OP_IMAGEBLIT 3
+
+struct qxl_fb_op {
+ struct list_head head;
+ int op_type;
+ union {
+ struct fb_fillrect fr;
+ struct fb_copyarea ca;
+ struct fb_image ib;
+ } op;
+ void *img_data;
+};
+
struct qxl_fbdev {
struct drm_fb_helper helper;
struct qxl_framebuffer qfb;
struct list_head fbdev_list;
struct qxl_device *qdev;
+ spinlock_t delayed_ops_lock;
+ struct list_head delayed_ops;
void *shadow;
int size;
@@ -164,8 +181,69 @@ static struct fb_deferred_io qxl_defio = {
.deferred_io = qxl_deferred_io,
};
-static void qxl_fb_fillrect(struct fb_info *info,
- const struct fb_fillrect *fb_rect)
+static void qxl_fb_delayed_fillrect(struct qxl_fbdev *qfbdev,
+ const struct fb_fillrect *fb_rect)
+{
+ struct qxl_fb_op *op;
+ unsigned long flags;
+
+ op = kmalloc(sizeof(struct qxl_fb_op), GFP_ATOMIC | __GFP_NOWARN);
+ if (!op)
+ return;
+
+ op->op.fr = *fb_rect;
+ op->img_data = NULL;
+ op->op_type = QXL_FB_OP_FILLRECT;
+
+ spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
+ list_add_tail(&op->head, &qfbdev->delayed_ops);
+ spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
+}
+
+static void qxl_fb_delayed_copyarea(struct qxl_fbdev *qfbdev,
+ const struct fb_copyarea *fb_copy)
+{
+ struct qxl_fb_op *op;
+ unsigned long flags;
+
+ op = kmalloc(sizeof(struct qxl_fb_op), GFP_ATOMIC | __GFP_NOWARN);
+ if (!op)
+ return;
+
+ op->op.ca = *fb_copy;
+ op->img_data = NULL;
+ op->op_type = QXL_FB_OP_COPYAREA;
+
+ spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
+ list_add_tail(&op->head, &qfbdev->delayed_ops);
+ spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
+}
+
+static void qxl_fb_delayed_imageblit(struct qxl_fbdev *qfbdev,
+ const struct fb_image *fb_image)
+{
+ struct qxl_fb_op *op;
+ unsigned long flags;
+ uint32_t size = fb_image->width * fb_image->height * (fb_image->depth >= 8 ? fb_image->depth / 8 : 1);
+
+ op = kmalloc(sizeof(struct qxl_fb_op) + size, GFP_ATOMIC | __GFP_NOWARN);
+ if (!op)
+ return;
+
+ op->op.ib = *fb_image;
+ op->img_data = (void *)(op + 1);
+ op->op_type = QXL_FB_OP_IMAGEBLIT;
+
+ memcpy(op->img_data, fb_image->data, size);
+
+ op->op.ib.data = op->img_data;
+ spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
+ list_add_tail(&op->head, &qfbdev->delayed_ops);
+ spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
+}
+
+static void qxl_fb_fillrect_internal(struct fb_info *info,
+ const struct fb_fillrect *fb_rect)
{
struct qxl_fbdev *qfbdev = info->par;
struct qxl_device *qdev = qfbdev->qdev;
@@ -203,17 +281,28 @@ static void qxl_fb_fillrect(struct fb_info *info,
qxl_draw_fill_rec.rect = rect;
qxl_draw_fill_rec.color = color;
qxl_draw_fill_rec.rop = rop;
+
+ qxl_draw_fill(&qxl_draw_fill_rec);
+}
+
+static void qxl_fb_fillrect(struct fb_info *info,
+ const struct fb_fillrect *fb_rect)
+{
+ struct qxl_fbdev *qfbdev = info->par;
+ struct qxl_device *qdev = qfbdev->qdev;
+
if (!drm_can_sleep()) {
- qxl_io_log(qdev,
- "%s: TODO use RCU, mysterious locks with spin_lock\n",
- __func__);
+ qxl_fb_delayed_fillrect(qfbdev, fb_rect);
+ schedule_work(&qdev->fb_work);
return;
}
- qxl_draw_fill(&qxl_draw_fill_rec);
+ /* make sure any previous work is done */
+ flush_work(&qdev->fb_work);
+ qxl_fb_fillrect_internal(info, fb_rect);
}
-static void qxl_fb_copyarea(struct fb_info *info,
- const struct fb_copyarea *region)
+static void qxl_fb_copyarea_internal(struct fb_info *info,
+ const struct fb_copyarea *region)
{
struct qxl_fbdev *qfbdev = info->par;
@@ -223,37 +312,89 @@ static void qxl_fb_copyarea(struct fb_info *info,
region->dx, region->dy);
}
+static void qxl_fb_copyarea(struct fb_info *info,
+ const struct fb_copyarea *region)
+{
+ struct qxl_fbdev *qfbdev = info->par;
+ struct qxl_device *qdev = qfbdev->qdev;
+
+ if (!drm_can_sleep()) {
+ qxl_fb_delayed_copyarea(qfbdev, region);
+ schedule_work(&qdev->fb_work);
+ return;
+ }
+ /* make sure any previous work is done */
+ flush_work(&qdev->fb_work);
+ qxl_fb_copyarea_internal(info, region);
+}
+
static void qxl_fb_imageblit_safe(struct qxl_fb_image *qxl_fb_image)
{
qxl_draw_opaque_fb(qxl_fb_image, 0);
}
+static void qxl_fb_imageblit_internal(struct fb_info *info,
+ const struct fb_image *image)
+{
+ struct qxl_fbdev *qfbdev = info->par;
+ struct qxl_fb_image qxl_fb_image;
+
+ /* ensure proper order rendering operations - TODO: must do this
+ * for everything. */
+ qxl_fb_image_init(&qxl_fb_image, qfbdev->qdev, info, image);
+ qxl_fb_imageblit_safe(&qxl_fb_image);
+}
+
static void qxl_fb_imageblit(struct fb_info *info,
const struct fb_image *image)
{
struct qxl_fbdev *qfbdev = info->par;
struct qxl_device *qdev = qfbdev->qdev;
- struct qxl_fb_image qxl_fb_image;
if (!drm_can_sleep()) {
- /* we cannot do any ttm_bo allocation since that will fail on
- * ioremap_wc..__get_vm_area_node, so queue the work item
- * instead This can happen from printk inside an interrupt
- * context, i.e.: smp_apic_timer_interrupt..check_cpu_stall */
- qxl_io_log(qdev,
- "%s: TODO use RCU, mysterious locks with spin_lock\n",
- __func__);
+ qxl_fb_delayed_imageblit(qfbdev, image);
+ schedule_work(&qdev->fb_work);
return;
}
+ /* make sure any previous work is done */
+ flush_work(&qdev->fb_work);
+ qxl_fb_imageblit_internal(info, image);
+}
- /* ensure proper order of rendering operations - TODO: must do this
- * for everything. */
- qxl_fb_image_init(&qxl_fb_image, qfbdev->qdev, info, image);
- qxl_fb_imageblit_safe(&qxl_fb_image);
+static void qxl_fb_work(struct work_struct *work)
+{
+ struct qxl_device *qdev = container_of(work, struct qxl_device, fb_work);
+ unsigned long flags;
+ struct qxl_fb_op *entry, *tmp;
+ struct qxl_fbdev *qfbdev = qdev->mode_info.qfbdev;
+
+ /* since the irq context just adds entries to the end of the
+ list dropping the lock should be fine, as entry isn't modified
+ in the operation code */
+ spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
+ list_for_each_entry_safe(entry, tmp, &qfbdev->delayed_ops, head) {
+ spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
+ switch (entry->op_type) {
+ case QXL_FB_OP_FILLRECT:
+ qxl_fb_fillrect_internal(qfbdev->helper.fbdev, &entry->op.fr);
+ break;
+ case QXL_FB_OP_COPYAREA:
+ qxl_fb_copyarea_internal(qfbdev->helper.fbdev, &entry->op.ca);
+ break;
+ case QXL_FB_OP_IMAGEBLIT:
+ qxl_fb_imageblit_internal(qfbdev->helper.fbdev, &entry->op.ib);
+ break;
+ }
+ spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
+ list_del(&entry->head);
+ kfree(entry);
+ }
+ spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
}
int qxl_fb_init(struct qxl_device *qdev)
{
+ INIT_WORK(&qdev->fb_work, qxl_fb_work);
return 0;
}
@@ -536,7 +677,8 @@ int qxl_fbdev_init(struct qxl_device *qdev)
qfbdev->qdev = qdev;
qdev->mode_info.qfbdev = qfbdev;
qfbdev->helper.funcs = &qxl_fb_helper_funcs;
-
+ spin_lock_init(&qfbdev->delayed_ops_lock);
+ INIT_LIST_HEAD(&qfbdev->delayed_ops);
ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper,
qxl_num_crtc /* num_crtc - QXL supports just 1 */,
QXLFB_CONN_LIMIT);
diff --git a/drivers/gpu/drm/qxl/qxl_fence.c b/drivers/gpu/drm/qxl/qxl_fence.c
index 63c6715..ae59e91 100644
--- a/drivers/gpu/drm/qxl/qxl_fence.c
+++ b/drivers/gpu/drm/qxl/qxl_fence.c
@@ -49,17 +49,11 @@
For some reason every so often qxl hw fails to release, things go wrong.
*/
-
-
-int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id)
+/* must be called with the fence lock held */
+void qxl_fence_add_release_locked(struct qxl_fence *qfence, uint32_t rel_id)
{
- struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence);
-
- spin_lock(&bo->tbo.bdev->fence_lock);
radix_tree_insert(&qfence->tree, rel_id, qfence);
qfence->num_active_releases++;
- spin_unlock(&bo->tbo.bdev->fence_lock);
- return 0;
}
int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id)
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
index a235693..25e1777 100644
--- a/drivers/gpu/drm/qxl/qxl_gem.c
+++ b/drivers/gpu/drm/qxl/qxl_gem.c
@@ -55,7 +55,7 @@ int qxl_gem_object_create(struct qxl_device *qdev, int size,
/* At least align on page size */
if (alignment < PAGE_SIZE)
alignment = PAGE_SIZE;
- r = qxl_bo_create(qdev, size, kernel, initial_domain, surf, &qbo);
+ r = qxl_bo_create(qdev, size, kernel, false, initial_domain, surf, &qbo);
if (r) {
if (r != -ERESTARTSYS)
DRM_ERROR(
diff --git a/drivers/gpu/drm/qxl/qxl_image.c b/drivers/gpu/drm/qxl/qxl_image.c
index cf85620..7fbcc35 100644
--- a/drivers/gpu/drm/qxl/qxl_image.c
+++ b/drivers/gpu/drm/qxl/qxl_image.c
@@ -30,31 +30,100 @@
#include "qxl_object.h"
static int
-qxl_image_create_helper(struct qxl_device *qdev,
+qxl_allocate_chunk(struct qxl_device *qdev,
+ struct qxl_release *release,
+ struct qxl_drm_image *image,
+ unsigned int chunk_size)
+{
+ struct qxl_drm_chunk *chunk;
+ int ret;
+
+ chunk = kmalloc(sizeof(struct qxl_drm_chunk), GFP_KERNEL);
+ if (!chunk)
+ return -ENOMEM;
+
+ ret = qxl_alloc_bo_reserved(qdev, release, chunk_size, &chunk->bo);
+ if (ret) {
+ kfree(chunk);
+ return ret;
+ }
+
+ list_add_tail(&chunk->head, &image->chunk_list);
+ return 0;
+}
+
+int
+qxl_image_alloc_objects(struct qxl_device *qdev,
struct qxl_release *release,
- struct qxl_bo **image_bo,
- const uint8_t *data,
- int width, int height,
- int depth, unsigned int hash,
- int stride)
+ struct qxl_drm_image **image_ptr,
+ int height, int stride)
+{
+ struct qxl_drm_image *image;
+ int ret;
+
+ image = kmalloc(sizeof(struct qxl_drm_image), GFP_KERNEL);
+ if (!image)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&image->chunk_list);
+
+ ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_image), &image->bo);
+ if (ret) {
+ kfree(image);
+ return ret;
+ }
+
+ ret = qxl_allocate_chunk(qdev, release, image, sizeof(struct qxl_data_chunk) + stride * height);
+ if (ret) {
+ qxl_bo_unref(&image->bo);
+ kfree(image);
+ return ret;
+ }
+ *image_ptr = image;
+ return 0;
+}
+
+void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage)
{
+ struct qxl_drm_chunk *chunk, *tmp;
+
+ list_for_each_entry_safe(chunk, tmp, &dimage->chunk_list, head) {
+ qxl_bo_unref(&chunk->bo);
+ kfree(chunk);
+ }
+
+ qxl_bo_unref(&dimage->bo);
+ kfree(dimage);
+}
+
+static int
+qxl_image_init_helper(struct qxl_device *qdev,
+ struct qxl_release *release,
+ struct qxl_drm_image *dimage,
+ const uint8_t *data,
+ int width, int height,
+ int depth, unsigned int hash,
+ int stride)
+{
+ struct qxl_drm_chunk *drv_chunk;
struct qxl_image *image;
struct qxl_data_chunk *chunk;
int i;
int chunk_stride;
int linesize = width * depth / 8;
- struct qxl_bo *chunk_bo;
- int ret;
+ struct qxl_bo *chunk_bo, *image_bo;
void *ptr;
/* Chunk */
/* FIXME: Check integer overflow */
/* TODO: variable number of chunks */
+
+ drv_chunk = list_first_entry(&dimage->chunk_list, struct qxl_drm_chunk, head);
+
+ chunk_bo = drv_chunk->bo;
chunk_stride = stride; /* TODO: should use linesize, but it renders
wrong (check the bitmaps are sent correctly
first) */
- ret = qxl_alloc_bo_reserved(qdev, sizeof(*chunk) + height * chunk_stride,
- &chunk_bo);
-
+
ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, 0);
chunk = ptr;
chunk->data_size = height * chunk_stride;
@@ -102,7 +171,6 @@ qxl_image_create_helper(struct qxl_device *qdev,
while (remain > 0) {
page_base = out_offset & PAGE_MASK;
page_offset = offset_in_page(out_offset);
-
size = min((int)(PAGE_SIZE - page_offset), remain);
ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page_base);
@@ -116,14 +184,10 @@ qxl_image_create_helper(struct qxl_device *qdev,
}
}
}
-
-
qxl_bo_kunmap(chunk_bo);
- /* Image */
- ret = qxl_alloc_bo_reserved(qdev, sizeof(*image), image_bo);
-
- ptr = qxl_bo_kmap_atomic_page(qdev, *image_bo, 0);
+ image_bo = dimage->bo;
+ ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0);
image = ptr;
image->descriptor.id = 0;
@@ -154,23 +218,20 @@ qxl_image_create_helper(struct qxl_device *qdev,
image->u.bitmap.stride = chunk_stride;
image->u.bitmap.palette = 0;
image->u.bitmap.data = qxl_bo_physical_address(qdev, chunk_bo, 0);
- qxl_release_add_res(qdev, release, chunk_bo);
- qxl_bo_unreserve(chunk_bo);
- qxl_bo_unref(&chunk_bo);
- qxl_bo_kunmap_atomic_page(qdev, *image_bo, ptr);
+ qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
return 0;
}
-int qxl_image_create(struct qxl_device *qdev,
+int qxl_image_init(struct qxl_device *qdev,
struct qxl_release *release,
- struct qxl_bo **image_bo,
+ struct qxl_drm_image *dimage,
const uint8_t *data,
int x, int y, int width, int height,
int depth, int stride)
{
data += y * stride + x * (depth / 8);
- return qxl_image_create_helper(qdev, release, image_bo, data,
+ return qxl_image_init_helper(qdev, release, dimage, data,
width, height, depth, 0, stride);
}
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 27f45e4..6de3356 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -68,55 +68,60 @@ static int qxl_map_ioctl(struct drm_device *dev, void *data,
&qxl_map->offset);
}
+struct qxl_reloc_info {
+ int type;
+ struct qxl_bo *dst_bo;
+ uint32_t dst_offset;
+ struct qxl_bo *src_bo;
+ int src_offset;
+};
+
/*
* dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's
* are on vram).
* *(dst + dst_off) = qxl_bo_physical_address(src, src_off)
*/
static void
-apply_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off,
- struct qxl_bo *src, uint64_t src_off)
+apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
{
void *reloc_page;
-
- reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK);
- *(uint64_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
- src, src_off);
- qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page);
+ reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
+ *(uint64_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
+ info->src_bo,
+ info->src_offset);
+ qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
}
static void
-apply_surf_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off,
- struct qxl_bo *src)
+apply_surf_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
{
uint32_t id = 0;
void *reloc_page;
- if (src && !src->is_primary)
- id = src->surface_id;
+ if (info->src_bo && !info->src_bo->is_primary)
+ id = info->src_bo->surface_id;
- reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK);
- *(uint32_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = id;
- qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page);
+ reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
+ *(uint32_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = id;
+ qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
}
/* return holding the reference to this object */
static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev,
struct drm_file *file_priv, uint64_t handle,
- struct qxl_reloc_list *reloc_list)
+ struct qxl_release *release)
{
struct drm_gem_object *gobj;
struct qxl_bo *qobj;
int ret;
gobj = drm_gem_object_lookup(qdev->ddev, file_priv, handle);
- if (!gobj) {
- DRM_ERROR("bad bo handle %lld\n", handle);
+ if (!gobj)
return NULL;
- }
+
qobj = gem_to_qxl_bo(gobj);
- ret = qxl_bo_list_add(reloc_list, qobj);
+ ret = qxl_release_list_add(release, qobj);
if (ret)
return NULL;
@@ -129,151 +134,177 @@ static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev,
* However, the command as passed from user space must *not* contain the initial
* QXLReleaseInfo struct (first XXX bytes)
*/
-static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+static int qxl_process_single_command(struct qxl_device *qdev,
+ struct drm_qxl_command *cmd,
+ struct drm_file *file_priv)
{
- struct qxl_device *qdev = dev->dev_private;
- struct drm_qxl_execbuffer *execbuffer = data;
- struct drm_qxl_command user_cmd;
- int cmd_num;
- struct qxl_bo *reloc_src_bo;
- struct qxl_bo *reloc_dst_bo;
- struct drm_qxl_reloc reloc;
+ struct qxl_reloc_info *reloc_info;
+ int release_type;
+ struct qxl_release *release;
+ struct qxl_bo *cmd_bo;
void *fb_cmd;
- int i, ret;
- struct qxl_reloc_list reloc_list;
+ int i, j, ret, num_relocs;
int unwritten;
- uint32_t reloc_dst_offset;
- INIT_LIST_HEAD(&reloc_list.bos);
- for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
- struct qxl_release *release;
- struct qxl_bo *cmd_bo;
- int release_type;
- struct drm_qxl_command *commands =
- (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
+ switch (cmd->type) {
+ case QXL_CMD_DRAW:
+ release_type = QXL_RELEASE_DRAWABLE;
+ break;
+ case QXL_CMD_SURFACE:
+ case QXL_CMD_CURSOR:
+ default:
+ DRM_DEBUG("Only draw commands in execbuffers\n");
+ return -EINVAL;
+ break;
+ }
- if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num],
- sizeof(user_cmd)))
- return -EFAULT;
- switch (user_cmd.type) {
- case QXL_CMD_DRAW:
- release_type = QXL_RELEASE_DRAWABLE;
- break;
- case QXL_CMD_SURFACE:
- case QXL_CMD_CURSOR:
- default:
- DRM_DEBUG("Only draw commands in execbuffers\n");
- return -EINVAL;
- break;
- }
+ if (cmd->command_size > PAGE_SIZE - sizeof(union qxl_release_info))
+ return -EINVAL;
- if (user_cmd.command_size > PAGE_SIZE - sizeof(union qxl_release_info))
- return -EINVAL;
+ if (!access_ok(VERIFY_READ,
+ (void *)(unsigned long)cmd->command,
+ cmd->command_size))
+ return -EFAULT;
- if (!access_ok(VERIFY_READ,
- (void *)(unsigned long)user_cmd.command,
- user_cmd.command_size))
- return -EFAULT;
+ reloc_info = kmalloc(sizeof(struct qxl_reloc_info) * cmd->relocs_num, GFP_KERNEL);
+ if (!reloc_info)
+ return -ENOMEM;
- ret = qxl_alloc_release_reserved(qdev,
- sizeof(union qxl_release_info) +
- user_cmd.command_size,
- release_type,
- &release,
- &cmd_bo);
- if (ret)
- return ret;
+ ret = qxl_alloc_release_reserved(qdev,
+ sizeof(union qxl_release_info) +
+ cmd->command_size,
+ release_type,
+ &release,
+ &cmd_bo);
+ if (ret)
+ goto out_free_reloc;
- /* TODO copy slow path code from i915 */
- fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
- unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)user_cmd.command, user_cmd.command_size);
+ /* TODO copy slow path code from i915 */
+ fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
+ unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
- {
- struct qxl_drawable *draw = fb_cmd;
+ {
+ struct qxl_drawable *draw = fb_cmd;
+ draw->mm_time = qdev->rom->mm_clock;
+ }
- draw->mm_time = qdev->rom->mm_clock;
- }
- qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd);
- if (unwritten) {
- DRM_ERROR("got unwritten %d\n", unwritten);
- qxl_release_unreserve(qdev, release);
- qxl_release_free(qdev, release);
- return -EFAULT;
+ qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd);
+ if (unwritten) {
+ DRM_ERROR("got unwritten %d\n", unwritten);
+ ret = -EFAULT;
+ goto out_free_release;
+ }
+
+ /* fill out reloc info structs */
+ num_relocs = 0;
+ for (i = 0; i < cmd->relocs_num; ++i) {
+ struct drm_qxl_reloc reloc;
+
+ if (DRM_COPY_FROM_USER(&reloc,
+ &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
+ sizeof(reloc))) {
+ ret = -EFAULT;
+ goto out_free_bos;
}
- for (i = 0 ; i < user_cmd.relocs_num; ++i) {
- if (DRM_COPY_FROM_USER(&reloc,
- &((struct drm_qxl_reloc *)(uintptr_t)user_cmd.relocs)[i],
- sizeof(reloc))) {
- qxl_bo_list_unreserve(&reloc_list, true);
- qxl_release_unreserve(qdev, release);
- qxl_release_free(qdev, release);
- return -EFAULT;
- }
+ /* add the bos to the list of bos to validate -
+ need to validate first then process relocs? */
+ if (reloc.reloc_type != QXL_RELOC_TYPE_BO && reloc.reloc_type != QXL_RELOC_TYPE_SURF) {
+ DRM_DEBUG("unknown reloc type %d\n", reloc_info[i].type);
- /* add the bos to the list of bos to validate -
- need to validate first then process relocs? */
- if (reloc.dst_handle) {
- reloc_dst_bo = qxlhw_handle_to_bo(qdev, file_priv,
- reloc.dst_handle, &reloc_list);
- if (!reloc_dst_bo) {
- qxl_bo_list_unreserve(&reloc_list, true);
- qxl_release_unreserve(qdev, release);
- qxl_release_free(qdev, release);
- return -EINVAL;
- }
- reloc_dst_offset = 0;
- } else {
- reloc_dst_bo = cmd_bo;
- reloc_dst_offset = release->release_offset;
+ ret = -EINVAL;
+ goto out_free_bos;
+ }
+ reloc_info[i].type = reloc.reloc_type;
+
+ if (reloc.dst_handle) {
+ reloc_info[i].dst_bo = qxlhw_handle_to_bo(qdev, file_priv,
+ reloc.dst_handle, release);
+ if (!reloc_info[i].dst_bo) {
+ ret = -EINVAL;
+ reloc_info[i].src_bo = NULL;
+ goto out_free_bos;
}
-
- /* reserve and validate the reloc dst bo */
- if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) {
- reloc_src_bo =
- qxlhw_handle_to_bo(qdev, file_priv,
- reloc.src_handle, &reloc_list);
- if (!reloc_src_bo) {
- if (reloc_dst_bo != cmd_bo)
- drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base);
- qxl_bo_list_unreserve(&reloc_list, true);
- qxl_release_unreserve(qdev, release);
- qxl_release_free(qdev, release);
- return -EINVAL;
- }
- } else
- reloc_src_bo = NULL;
- if (reloc.reloc_type == QXL_RELOC_TYPE_BO) {
- apply_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset,
- reloc_src_bo, reloc.src_offset);
- } else if (reloc.reloc_type == QXL_RELOC_TYPE_SURF) {
- apply_surf_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset, reloc_src_bo);
- } else {
- DRM_ERROR("unknown reloc type %d\n", reloc.reloc_type);
- return -EINVAL;
+ reloc_info[i].dst_offset = reloc.dst_offset;
+ } else {
+ reloc_info[i].dst_bo = cmd_bo;
+ reloc_info[i].dst_offset = reloc.dst_offset + release->release_offset;
+ }
+ num_relocs++;
+
+ /* reserve and validate the reloc dst bo */
+ if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) {
+ reloc_info[i].src_bo =
+ qxlhw_handle_to_bo(qdev, file_priv,
+ reloc.src_handle, release);
+ if (!reloc_info[i].src_bo) {
+ if (reloc_info[i].dst_bo != cmd_bo)
+ drm_gem_object_unreference_unlocked(&reloc_info[i].dst_bo->gem_base);
+ ret = -EINVAL;
+ goto out_free_bos;
}
+ reloc_info[i].src_offset = reloc.src_offset;
+ } else {
+ reloc_info[i].src_bo = NULL;
+ reloc_info[i].src_offset = 0;
+ }
+ }
- if (reloc_src_bo && reloc_src_bo != cmd_bo) {
- qxl_release_add_res(qdev, release, reloc_src_bo);
- drm_gem_object_unreference_unlocked(&reloc_src_bo->gem_base);
- }
+ /* validate all buffers */
+ ret = qxl_release_reserve_list(release, false);
+ if (ret)
+ goto out_free_bos;
- if (reloc_dst_bo != cmd_bo)
- drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base);
- }
- qxl_fence_releaseable(qdev, release);
+ for (i = 0; i < cmd->relocs_num; ++i) {
+ if (reloc_info[i].type == QXL_RELOC_TYPE_BO)
+ apply_reloc(qdev, &reloc_info[i]);
+ else if (reloc_info[i].type == QXL_RELOC_TYPE_SURF)
+ apply_surf_reloc(qdev, &reloc_info[i]);
+ }
- ret = qxl_push_command_ring_release(qdev, release, user_cmd.type, true);
- if (ret == -ERESTARTSYS) {
- qxl_release_unreserve(qdev, release);
- qxl_release_free(qdev, release);
- qxl_bo_list_unreserve(&reloc_list, true);
+ ret = qxl_push_command_ring_release(qdev, release, cmd->type, true);
+ if (ret)
+ qxl_release_backoff_reserve_list(release);
+ else
+ qxl_release_fence_buffer_objects(release);
+
+out_free_bos:
+ for (j = 0; j < num_relocs; j++) {
+ if (reloc_info[j].dst_bo != cmd_bo)
+ drm_gem_object_unreference_unlocked(&reloc_info[j].dst_bo->gem_base);
+ if (reloc_info[j].src_bo && reloc_info[j].src_bo != cmd_bo)
+ drm_gem_object_unreference_unlocked(&reloc_info[j].src_bo->gem_base);
+ }
+out_free_release:
+ if (ret)
+ qxl_release_free(qdev, release);
+out_free_reloc:
+ kfree(reloc_info);
+ return ret;
+}
+
+static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct qxl_device *qdev = dev->dev_private;
+ struct drm_qxl_execbuffer *execbuffer = data;
+ struct drm_qxl_command user_cmd;
+ int cmd_num;
+ int ret;
+
+ for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
+
+ struct drm_qxl_command *commands =
+ (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
+
+ if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num],
+ sizeof(user_cmd)))
+ return -EFAULT;
+
+ ret = qxl_process_single_command(qdev, &user_cmd, file_priv);
+ if (ret)
return ret;
- }
- qxl_release_unreserve(qdev, release);
}
- qxl_bo_list_unreserve(&reloc_list, 0);
return 0;
}
@@ -305,7 +336,7 @@ static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
goto out;
if (!qobj->pin_count) {
- qxl_ttm_placement_from_domain(qobj, qobj->type);
+ qxl_ttm_placement_from_domain(qobj, qobj->type, false);
ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
true, false);
if (unlikely(ret))
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index 1191fe7..aa161cd 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -51,20 +51,21 @@ bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
return false;
}
-void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain)
+void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
{
u32 c = 0;
+ u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0;
qbo->placement.fpfn = 0;
qbo->placement.lpfn = 0;
qbo->placement.placement = qbo->placements;
qbo->placement.busy_placement = qbo->placements;
if (domain == QXL_GEM_DOMAIN_VRAM)
- qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM;
+ qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag;
if (domain == QXL_GEM_DOMAIN_SURFACE)
- qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0;
+ qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0 | pflag;
if (domain == QXL_GEM_DOMAIN_CPU)
- qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag;
if (!c)
qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
qbo->placement.num_placement = c;
@@ -73,7 +74,7 @@ void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain)
int qxl_bo_create(struct qxl_device *qdev,
- unsigned long size, bool kernel, u32 domain,
+ unsigned long size, bool kernel, bool pinned, u32 domain,
struct qxl_surface *surf,
struct qxl_bo **bo_ptr)
{
@@ -99,15 +100,15 @@ int qxl_bo_create(struct qxl_device *qdev,
}
bo->gem_base.driver_private = NULL;
bo->type = domain;
- bo->pin_count = 0;
+ bo->pin_count = pinned ? 1 : 0;
bo->surface_id = 0;
qxl_fence_init(qdev, &bo->fence);
INIT_LIST_HEAD(&bo->list);
- atomic_set(&bo->reserve_count, 0);
+
if (surf)
bo->surf = *surf;
- qxl_ttm_placement_from_domain(bo, domain);
+ qxl_ttm_placement_from_domain(bo, domain, pinned);
r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type,
&bo->placement, 0, !kernel, NULL, size,
@@ -228,7 +229,7 @@ struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
{
struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
- int r, i;
+ int r;
if (bo->pin_count) {
bo->pin_count++;
@@ -236,9 +237,7 @@ int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
*gpu_addr = qxl_bo_gpu_offset(bo);
return 0;
}
- qxl_ttm_placement_from_domain(bo, domain);
- for (i = 0; i < bo->placement.num_placement; i++)
- bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+ qxl_ttm_placement_from_domain(bo, domain, true);
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (likely(r == 0)) {
bo->pin_count = 1;
@@ -317,53 +316,6 @@ int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
return 0;
}
-void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed)
-{
- struct qxl_bo_list *entry, *sf;
-
- list_for_each_entry_safe(entry, sf, &reloc_list->bos, lhead) {
- qxl_bo_unreserve(entry->bo);
- list_del(&entry->lhead);
- kfree(entry);
- }
-}
-
-int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo)
-{
- struct qxl_bo_list *entry;
- int ret;
-
- list_for_each_entry(entry, &reloc_list->bos, lhead) {
- if (entry->bo == bo)
- return 0;
- }
-
- entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
-
- entry->bo = bo;
- list_add(&entry->lhead, &reloc_list->bos);
-
- ret = qxl_bo_reserve(bo, false);
- if (ret)
- return ret;
-
- if (!bo->pin_count) {
- qxl_ttm_placement_from_domain(bo, bo->type);
- ret = ttm_bo_validate(&bo->tbo, &bo->placement,
- true, false);
- if (ret)
- return ret;
- }
-
- /* allocate a surface for reserved + validated buffers */
- ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo);
- if (ret)
- return ret;
- return 0;
-}
-
int qxl_surf_evict(struct qxl_device *qdev)
{
return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV0);
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
index ee7ad79..8cb6167 100644
--- a/drivers/gpu/drm/qxl/qxl_object.h
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -88,7 +88,7 @@ static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
extern int qxl_bo_create(struct qxl_device *qdev,
unsigned long size,
- bool kernel, u32 domain,
+ bool kernel, bool pinned, u32 domain,
struct qxl_surface *surf,
struct qxl_bo **bo_ptr);
extern int qxl_bo_kmap(struct qxl_bo *bo, void **ptr);
@@ -99,9 +99,7 @@ extern struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo);
extern void qxl_bo_unref(struct qxl_bo **bo);
extern int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr);
extern int qxl_bo_unpin(struct qxl_bo *bo);
-extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain);
+extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned);
extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo);
-extern int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo);
-extern void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed);
#endif
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index b443d67..b61449e 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -38,7 +38,8 @@
static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
-uint64_t
+
+static uint64_t
qxl_release_alloc(struct qxl_device *qdev, int type,
struct qxl_release **ret)
{
@@ -53,9 +54,9 @@ qxl_release_alloc(struct qxl_device *qdev, int type,
return 0;
}
release->type = type;
- release->bo_count = 0;
release->release_offset = 0;
release->surface_release_id = 0;
+ INIT_LIST_HEAD(&release->bos);
idr_preload(GFP_KERNEL);
spin_lock(&qdev->release_idr_lock);
@@ -77,20 +78,20 @@ void
qxl_release_free(struct qxl_device *qdev,
struct qxl_release *release)
{
- int i;
-
- QXL_INFO(qdev, "release %d, type %d, %d bos\n", release->id,
- release->type, release->bo_count);
+ struct qxl_bo_list *entry, *tmp;
+ QXL_INFO(qdev, "release %d, type %d\n", release->id,
+ release->type);
if (release->surface_release_id)
qxl_surface_id_dealloc(qdev, release->surface_release_id);
- for (i = 0 ; i < release->bo_count; ++i) {
+ list_for_each_entry_safe(entry, tmp, &release->bos, tv.head) {
+ struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
QXL_INFO(qdev, "release %llx\n",
- release->bos[i]->tbo.addr_space_offset
+ entry->tv.bo->addr_space_offset
- DRM_FILE_OFFSET);
- qxl_fence_remove_release(&release->bos[i]->fence, release->id);
- qxl_bo_unref(&release->bos[i]);
+ qxl_fence_remove_release(&bo->fence, release->id);
+ qxl_bo_unref(&bo);
}
spin_lock(&qdev->release_idr_lock);
idr_remove(&qdev->release_idr, release->id);
@@ -98,83 +99,117 @@ qxl_release_free(struct qxl_device *qdev,
kfree(release);
}
-void
-qxl_release_add_res(struct qxl_device *qdev, struct qxl_release *release,
- struct qxl_bo *bo)
-{
- int i;
- for (i = 0; i < release->bo_count; i++)
- if (release->bos[i] == bo)
- return;
-
- if (release->bo_count >= QXL_MAX_RES) {
- DRM_ERROR("exceeded max resource on a qxl_release item\n");
- return;
- }
- release->bos[release->bo_count++] = qxl_bo_ref(bo);
-}
-
static int qxl_release_bo_alloc(struct qxl_device *qdev,
struct qxl_bo **bo)
{
int ret;
- ret = qxl_bo_create(qdev, PAGE_SIZE, false, QXL_GEM_DOMAIN_VRAM, NULL,
+ /* pin releases bo's they are too messy to evict */
+ ret = qxl_bo_create(qdev, PAGE_SIZE, false, true,
+ QXL_GEM_DOMAIN_VRAM, NULL,
bo);
return ret;
}
-int qxl_release_reserve(struct qxl_device *qdev,
- struct qxl_release *release, bool no_wait)
+int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
+{
+ struct qxl_bo_list *entry;
+
+ list_for_each_entry(entry, &release->bos, tv.head) {
+ if (entry->tv.bo == &bo->tbo)
+ return 0;
+ }
+
+ entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ qxl_bo_ref(bo);
+ entry->tv.bo = &bo->tbo;
+ list_add_tail(&entry->tv.head, &release->bos);
+ return 0;
+}
+
+static int qxl_release_validate_bo(struct qxl_bo *bo)
{
int ret;
- if (atomic_inc_return(&release->bos[0]->reserve_count) == 1) {
- ret = qxl_bo_reserve(release->bos[0], no_wait);
+
+ if (!bo->pin_count) {
+ qxl_ttm_placement_from_domain(bo, bo->type, false);
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement,
+ true, false);
if (ret)
return ret;
}
+
+ /* allocate a surface for reserved + validated buffers */
+ ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo);
+ if (ret)
+ return ret;
+ return 0;
+}
+
+int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
+{
+ int ret;
+ struct qxl_bo_list *entry;
+
+ /* if only one object on the release its the release itself
+ since these objects are pinned no need to reserve */
+ if (list_is_singular(&release->bos))
+ return 0;
+
+ ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(entry, &release->bos, tv.head) {
+ struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
+
+ ret = qxl_release_validate_bo(bo);
+ if (ret) {
+ ttm_eu_backoff_reservation(&release->ticket, &release->bos);
+ return ret;
+ }
+ }
return 0;
}
-void qxl_release_unreserve(struct qxl_device *qdev,
- struct qxl_release *release)
+void qxl_release_backoff_reserve_list(struct qxl_release *release)
{
- if (atomic_dec_and_test(&release->bos[0]->reserve_count))
- qxl_bo_unreserve(release->bos[0]);
+ /* if only one object on the release its the release itself
+ since these objects are pinned no need to reserve */
+ if (list_is_singular(&release->bos))
+ return;
+
+ ttm_eu_backoff_reservation(&release->ticket, &release->bos);
}
+
int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
enum qxl_surface_cmd_type surface_cmd_type,
struct qxl_release *create_rel,
struct qxl_release **release)
{
- int ret;
-
if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
int idr_ret;
+ struct qxl_bo_list *entry = list_first_entry(&create_rel->bos, struct qxl_bo_list, tv.head);
struct qxl_bo *bo;
union qxl_release_info *info;
/* stash the release after the create command */
idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
- bo = qxl_bo_ref(create_rel->bos[0]);
+ bo = qxl_bo_ref(to_qxl_bo(entry->tv.bo));
(*release)->release_offset = create_rel->release_offset + 64;
- qxl_release_add_res(qdev, *release, bo);
+ qxl_release_list_add(*release, bo);
- ret = qxl_release_reserve(qdev, *release, false);
- if (ret) {
- DRM_ERROR("release reserve failed\n");
- goto out_unref;
- }
info = qxl_release_map(qdev, *release);
info->id = idr_ret;
qxl_release_unmap(qdev, *release, info);
-
-out_unref:
qxl_bo_unref(&bo);
- return ret;
+ return 0;
}
return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd),
@@ -187,7 +222,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
{
struct qxl_bo *bo;
int idr_ret;
- int ret;
+ int ret = 0;
union qxl_release_info *info;
int cur_idx;
@@ -216,11 +251,6 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
mutex_unlock(&qdev->release_mutex);
return ret;
}
-
- /* pin releases bo's they are too messy to evict */
- ret = qxl_bo_reserve(qdev->current_release_bo[cur_idx], false);
- qxl_bo_pin(qdev->current_release_bo[cur_idx], QXL_GEM_DOMAIN_VRAM, NULL);
- qxl_bo_unreserve(qdev->current_release_bo[cur_idx]);
}
bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
@@ -231,36 +261,18 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
if (rbo)
*rbo = bo;
- qxl_release_add_res(qdev, *release, bo);
-
- ret = qxl_release_reserve(qdev, *release, false);
mutex_unlock(&qdev->release_mutex);
- if (ret)
- goto out_unref;
+
+ qxl_release_list_add(*release, bo);
info = qxl_release_map(qdev, *release);
info->id = idr_ret;
qxl_release_unmap(qdev, *release, info);
-out_unref:
qxl_bo_unref(&bo);
return ret;
}
-int qxl_fence_releaseable(struct qxl_device *qdev,
- struct qxl_release *release)
-{
- int i, ret;
- for (i = 0; i < release->bo_count; i++) {
- if (!release->bos[i]->tbo.sync_obj)
- release->bos[i]->tbo.sync_obj = &release->bos[i]->fence;
- ret = qxl_fence_add_release(&release->bos[i]->fence, release->id);
- if (ret)
- return ret;
- }
- return 0;
-}
-
struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
uint64_t id)
{
@@ -273,10 +285,7 @@ struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
DRM_ERROR("failed to find id in release_idr\n");
return NULL;
}
- if (release->bo_count < 1) {
- DRM_ERROR("read a released resource with 0 bos\n");
- return NULL;
- }
+
return release;
}
@@ -285,9 +294,12 @@ union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
{
void *ptr;
union qxl_release_info *info;
- struct qxl_bo *bo = release->bos[0];
+ struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
+ struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE);
+ if (!ptr)
+ return NULL;
info = ptr + (release->release_offset & ~PAGE_SIZE);
return info;
}
@@ -296,9 +308,51 @@ void qxl_release_unmap(struct qxl_device *qdev,
struct qxl_release *release,
union qxl_release_info *info)
{
- struct qxl_bo *bo = release->bos[0];
+ struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
+ struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
void *ptr;
ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE);
qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
}
+
+void qxl_release_fence_buffer_objects(struct qxl_release *release)
+{
+ struct ttm_validate_buffer *entry;
+ struct ttm_buffer_object *bo;
+ struct ttm_bo_global *glob;
+ struct ttm_bo_device *bdev;
+ struct ttm_bo_driver *driver;
+ struct qxl_bo *qbo;
+
+ /* if only one object on the release its the release itself
+ since these objects are pinned no need to reserve */
+ if (list_is_singular(&release->bos))
+ return;
+
+ bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo;
+ bdev = bo->bdev;
+ driver = bdev->driver;
+ glob = bo->glob;
+
+ spin_lock(&glob->lru_lock);
+ spin_lock(&bdev->fence_lock);
+
+ list_for_each_entry(entry, &release->bos, head) {
+ bo = entry->bo;
+ qbo = to_qxl_bo(bo);
+
+ if (!entry->bo->sync_obj)
+ entry->bo->sync_obj = &qbo->fence;
+
+ qxl_fence_add_release_locked(&qbo->fence, release->id);
+
+ ttm_bo_add_to_lru(bo);
+ ww_mutex_unlock(&bo->resv->lock);
+ entry->reserved = false;
+ }
+ spin_unlock(&bdev->fence_lock);
+ spin_unlock(&glob->lru_lock);
+ ww_acquire_fini(&release->ticket);
+}
+
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 489cb8c..1dfd84c 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -206,7 +206,7 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo,
return;
}
qbo = container_of(bo, struct qxl_bo, tbo);
- qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU);
+ qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU, false);
*placement = qbo->placement;
}
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index fb441a7..15da7ef 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -1222,12 +1222,17 @@ int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
int r;
mutex_lock(&ctx->mutex);
+ /* reset data block */
+ ctx->data_block = 0;
/* reset reg block */
ctx->reg_block = 0;
/* reset fb window */
ctx->fb_base = 0;
/* reset io mode */
ctx->io_mode = ATOM_IO_MM;
+ /* reset divmul */
+ ctx->divmul[0] = 0;
+ ctx->divmul[1] = 0;
r = atom_execute_table_locked(ctx, index, params);
mutex_unlock(&ctx->mutex);
return r;
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 064023b..32501f6 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -44,6 +44,41 @@ static char *pre_emph_names[] = {
};
/***** radeon AUX functions *****/
+
+/* Atom needs data in little endian format
+ * so swap as appropriate when copying data to
+ * or from atom. Note that atom operates on
+ * dw units.
+ */
+static void radeon_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
+{
+#ifdef __BIG_ENDIAN
+ u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */
+ u32 *dst32, *src32;
+ int i;
+
+ memcpy(src_tmp, src, num_bytes);
+ src32 = (u32 *)src_tmp;
+ dst32 = (u32 *)dst_tmp;
+ if (to_le) {
+ for (i = 0; i < ((num_bytes + 3) / 4); i++)
+ dst32[i] = cpu_to_le32(src32[i]);
+ memcpy(dst, dst_tmp, num_bytes);
+ } else {
+ u8 dws = num_bytes & ~3;
+ for (i = 0; i < ((num_bytes + 3) / 4); i++)
+ dst32[i] = le32_to_cpu(src32[i]);
+ memcpy(dst, dst_tmp, dws);
+ if (num_bytes % 4) {
+ for (i = 0; i < (num_bytes % 4); i++)
+ dst[dws+i] = dst_tmp[dws+i];
+ }
+ }
+#else
+ memcpy(dst, src, num_bytes);
+#endif
+}
+
union aux_channel_transaction {
PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1;
PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2;
@@ -65,10 +100,10 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1);
- memcpy(base, send, send_bytes);
+ radeon_copy_swap(base, send, send_bytes, true);
- args.v1.lpAuxRequest = 0 + 4;
- args.v1.lpDataOut = 16 + 4;
+ args.v1.lpAuxRequest = cpu_to_le16((u16)(0 + 4));
+ args.v1.lpDataOut = cpu_to_le16((u16)(16 + 4));
args.v1.ucDataOutLen = 0;
args.v1.ucChannelID = chan->rec.i2c_id;
args.v1.ucDelay = delay / 10;
@@ -102,7 +137,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
recv_bytes = recv_size;
if (recv && recv_size)
- memcpy(recv, base + 16, recv_bytes);
+ radeon_copy_swap(recv, base + 16, recv_bytes, false);
return recv_bytes;
}
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index 0bfd55e..9953e1f 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -2548,9 +2548,6 @@ int btc_dpm_init(struct radeon_device *rdev)
{
struct rv7xx_power_info *pi;
struct evergreen_power_info *eg_pi;
- int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
- u16 data_offset, size;
- u8 frev, crev;
struct atom_clock_dividers dividers;
int ret;
@@ -2633,16 +2630,7 @@ int btc_dpm_init(struct radeon_device *rdev)
eg_pi->vddci_control =
radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0);
- if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
- &frev, &crev, &data_offset)) {
- pi->sclk_ss = true;
- pi->mclk_ss = true;
- pi->dynamic_ss = true;
- } else {
- pi->sclk_ss = false;
- pi->mclk_ss = false;
- pi->dynamic_ss = true;
- }
+ rv770_get_engine_memory_ss(rdev);
pi->asi = RV770_ASI_DFLT;
pi->pasi = CYPRESS_HASI_DFLT;
@@ -2659,8 +2647,7 @@ int btc_dpm_init(struct radeon_device *rdev)
pi->dynamic_pcie_gen2 = true;
- if (pi->gfx_clock_gating &&
- (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
+ if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
pi->thermal_protection = true;
else
pi->thermal_protection = false;
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index ed1d910..8928bd1 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -22,7 +22,6 @@
* Authors: Alex Deucher
*/
#include <linux/firmware.h>
-#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/module.h>
#include "drmP.h"
@@ -742,7 +741,6 @@ static int ci_mc_load_microcode(struct radeon_device *rdev)
*/
static int cik_init_microcode(struct radeon_device *rdev)
{
- struct platform_device *pdev;
const char *chip_name;
size_t pfp_req_size, me_req_size, ce_req_size,
mec_req_size, rlc_req_size, mc_req_size,
@@ -752,13 +750,6 @@ static int cik_init_microcode(struct radeon_device *rdev)
DRM_DEBUG("\n");
- pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
- err = IS_ERR(pdev);
- if (err) {
- printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
- return -EINVAL;
- }
-
switch (rdev->family) {
case CHIP_BONAIRE:
chip_name = "BONAIRE";
@@ -794,7 +785,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
DRM_INFO("Loading %s Microcode\n", chip_name);
snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
- err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
+ err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
if (err)
goto out;
if (rdev->pfp_fw->size != pfp_req_size) {
@@ -806,7 +797,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
}
snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
- err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
+ err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
if (err)
goto out;
if (rdev->me_fw->size != me_req_size) {
@@ -817,7 +808,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
}
snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
- err = request_firmware(&rdev->ce_fw, fw_name, &pdev->dev);
+ err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
if (err)
goto out;
if (rdev->ce_fw->size != ce_req_size) {
@@ -828,7 +819,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
}
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name);
- err = request_firmware(&rdev->mec_fw, fw_name, &pdev->dev);
+ err = request_firmware(&rdev->mec_fw, fw_name, rdev->dev);
if (err)
goto out;
if (rdev->mec_fw->size != mec_req_size) {
@@ -839,7 +830,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
}
snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
- err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
+ err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
if (err)
goto out;
if (rdev->rlc_fw->size != rlc_req_size) {
@@ -850,7 +841,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
}
snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name);
- err = request_firmware(&rdev->sdma_fw, fw_name, &pdev->dev);
+ err = request_firmware(&rdev->sdma_fw, fw_name, rdev->dev);
if (err)
goto out;
if (rdev->sdma_fw->size != sdma_req_size) {
@@ -863,7 +854,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
/* No MC ucode on APUs */
if (!(rdev->flags & RADEON_IS_IGP)) {
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
- err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev);
+ err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
if (err)
goto out;
if (rdev->mc_fw->size != mc_req_size) {
@@ -875,8 +866,6 @@ static int cik_init_microcode(struct radeon_device *rdev)
}
out:
- platform_device_unregister(pdev);
-
if (err) {
if (err != -EINVAL)
printk(KERN_ERR
@@ -2598,9 +2587,11 @@ u32 cik_compute_ring_get_rptr(struct radeon_device *rdev,
if (rdev->wb.enabled) {
rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
} else {
+ mutex_lock(&rdev->srbm_mutex);
cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
rptr = RREG32(CP_HQD_PQ_RPTR);
cik_srbm_select(rdev, 0, 0, 0, 0);
+ mutex_unlock(&rdev->srbm_mutex);
}
rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
@@ -2615,9 +2606,11 @@ u32 cik_compute_ring_get_wptr(struct radeon_device *rdev,
if (rdev->wb.enabled) {
wptr = le32_to_cpu(rdev->wb.wb[ring->wptr_offs/4]);
} else {
+ mutex_lock(&rdev->srbm_mutex);
cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
wptr = RREG32(CP_HQD_PQ_WPTR);
cik_srbm_select(rdev, 0, 0, 0, 0);
+ mutex_unlock(&rdev->srbm_mutex);
}
wptr = (wptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
@@ -2908,6 +2901,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
WREG32(CP_CPF_DEBUG, tmp);
/* init the pipes */
+ mutex_lock(&rdev->srbm_mutex);
for (i = 0; i < (rdev->mec.num_pipe * rdev->mec.num_mec); i++) {
int me = (i < 4) ? 1 : 2;
int pipe = (i < 4) ? i : (i - 4);
@@ -2930,6 +2924,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
WREG32(CP_HPD_EOP_CONTROL, tmp);
}
cik_srbm_select(rdev, 0, 0, 0, 0);
+ mutex_unlock(&rdev->srbm_mutex);
/* init the queues. Just two for now. */
for (i = 0; i < 2; i++) {
@@ -2983,6 +2978,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
mqd->static_thread_mgmt23[0] = 0xffffffff;
mqd->static_thread_mgmt23[1] = 0xffffffff;
+ mutex_lock(&rdev->srbm_mutex);
cik_srbm_select(rdev, rdev->ring[idx].me,
rdev->ring[idx].pipe,
rdev->ring[idx].queue, 0);
@@ -3110,6 +3106,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
WREG32(CP_HQD_ACTIVE, mqd->queue_state.cp_hqd_active);
cik_srbm_select(rdev, 0, 0, 0, 0);
+ mutex_unlock(&rdev->srbm_mutex);
radeon_bo_kunmap(rdev->ring[idx].mqd_obj);
radeon_bo_unreserve(rdev->ring[idx].mqd_obj);
@@ -4331,6 +4328,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
/* XXX SH_MEM regs */
/* where to put LDS, scratch, GPUVM in FSA64 space */
+ mutex_lock(&rdev->srbm_mutex);
for (i = 0; i < 16; i++) {
cik_srbm_select(rdev, 0, 0, 0, i);
/* CP and shaders */
@@ -4346,6 +4344,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
/* XXX SDMA RLC - todo */
}
cik_srbm_select(rdev, 0, 0, 0, 0);
+ mutex_unlock(&rdev->srbm_mutex);
cik_pcie_gart_tlb_flush(rdev);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -4453,6 +4452,29 @@ void cik_vm_fini(struct radeon_device *rdev)
}
/**
+ * cik_vm_decode_fault - print human readable fault info
+ *
+ * @rdev: radeon_device pointer
+ * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
+ * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
+ *
+ * Print human readable fault information (CIK).
+ */
+static void cik_vm_decode_fault(struct radeon_device *rdev,
+ u32 status, u32 addr, u32 mc_client)
+{
+ u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
+ u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
+ u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
+ char *block = (char *)&mc_client;
+
+ printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n",
+ protections, vmid, addr,
+ (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
+ block, mc_id);
+}
+
+/**
* cik_vm_flush - cik vm flush using the CP
*
* @rdev: radeon_device pointer
@@ -5507,6 +5529,7 @@ int cik_irq_process(struct radeon_device *rdev)
u32 ring_index;
bool queue_hotplug = false;
bool queue_reset = false;
+ u32 addr, status, mc_client;
if (!rdev->ih.enabled || rdev->shutdown)
return IRQ_NONE;
@@ -5742,11 +5765,15 @@ restart_ih:
break;
case 146:
case 147:
+ addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
+ status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
+ mc_client = RREG32(VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
- RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
+ addr);
dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
- RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
+ status);
+ cik_vm_decode_fault(rdev, status, addr, mc_client);
/* reset addr and status */
WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
break;
@@ -5937,6 +5964,8 @@ static int cik_startup(struct radeon_device *rdev)
struct radeon_ring *ring;
int r;
+ cik_mc_program(rdev);
+
if (rdev->flags & RADEON_IS_IGP) {
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
!rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) {
@@ -5968,7 +5997,6 @@ static int cik_startup(struct radeon_device *rdev)
if (r)
return r;
- cik_mc_program(rdev);
r = cik_pcie_gart_enable(rdev);
if (r)
return r;
@@ -6177,7 +6205,7 @@ int cik_suspend(struct radeon_device *rdev)
radeon_vm_manager_fini(rdev);
cik_cp_enable(rdev, false);
cik_sdma_enable(rdev, false);
- r600_uvd_rbc_stop(rdev);
+ r600_uvd_stop(rdev);
radeon_uvd_suspend(rdev);
cik_irq_suspend(rdev);
radeon_wb_disable(rdev);
@@ -6341,6 +6369,7 @@ void cik_fini(struct radeon_device *rdev)
radeon_vm_manager_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
+ r600_uvd_stop(rdev);
radeon_uvd_fini(rdev);
cik_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index 63514b9..7e9275e 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -136,6 +136,22 @@
#define VM_INVALIDATE_RESPONSE 0x147c
#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC
+#define PROTECTIONS_MASK (0xf << 0)
+#define PROTECTIONS_SHIFT 0
+ /* bit 0: range
+ * bit 1: pde0
+ * bit 2: valid
+ * bit 3: read
+ * bit 4: write
+ */
+#define MEMORY_CLIENT_ID_MASK (0xff << 12)
+#define MEMORY_CLIENT_ID_SHIFT 12
+#define MEMORY_CLIENT_RW_MASK (1 << 24)
+#define MEMORY_CLIENT_RW_SHIFT 24
+#define FAULT_VMID_MASK (0xf << 25)
+#define FAULT_VMID_SHIFT 25
+
+#define VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT 0x14E4
#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
index 9bcdd17..7e5d0b5 100644
--- a/drivers/gpu/drm/radeon/cypress_dpm.c
+++ b/drivers/gpu/drm/radeon/cypress_dpm.c
@@ -2038,9 +2038,6 @@ int cypress_dpm_init(struct radeon_device *rdev)
{
struct rv7xx_power_info *pi;
struct evergreen_power_info *eg_pi;
- int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
- uint16_t data_offset, size;
- uint8_t frev, crev;
struct atom_clock_dividers dividers;
int ret;
@@ -2092,16 +2089,7 @@ int cypress_dpm_init(struct radeon_device *rdev)
eg_pi->vddci_control =
radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0);
- if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
- &frev, &crev, &data_offset)) {
- pi->sclk_ss = true;
- pi->mclk_ss = true;
- pi->dynamic_ss = true;
- } else {
- pi->sclk_ss = false;
- pi->mclk_ss = false;
- pi->dynamic_ss = true;
- }
+ rv770_get_engine_memory_ss(rdev);
pi->asi = RV770_ASI_DFLT;
pi->pasi = CYPRESS_HASI_DFLT;
@@ -2122,8 +2110,7 @@ int cypress_dpm_init(struct radeon_device *rdev)
pi->dynamic_pcie_gen2 = true;
- if (pi->gfx_clock_gating &&
- (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
+ if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
pi->thermal_protection = true;
else
pi->thermal_protection = false;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index e49059d..d5b49e3 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -139,6 +139,8 @@ void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
void evergreen_program_aspm(struct radeon_device *rdev);
extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
int ring, u32 cp_int_cntl);
+extern void cayman_vm_decode_fault(struct radeon_device *rdev,
+ u32 status, u32 addr);
static const u32 evergreen_golden_registers[] =
{
@@ -4586,6 +4588,7 @@ int evergreen_irq_process(struct radeon_device *rdev)
bool queue_hotplug = false;
bool queue_hdmi = false;
bool queue_thermal = false;
+ u32 status, addr;
if (!rdev->ih.enabled || rdev->shutdown)
return IRQ_NONE;
@@ -4872,11 +4875,14 @@ restart_ih:
break;
case 146:
case 147:
+ addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
+ status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
- RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
+ addr);
dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
- RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
+ status);
+ cayman_vm_decode_fault(rdev, status, addr);
/* reset addr and status */
WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
break;
@@ -5100,6 +5106,8 @@ static int evergreen_startup(struct radeon_device *rdev)
/* enable aspm */
evergreen_program_aspm(rdev);
+ evergreen_mc_program(rdev);
+
if (ASIC_IS_DCE5(rdev)) {
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
r = ni_init_microcode(rdev);
@@ -5127,7 +5135,6 @@ static int evergreen_startup(struct radeon_device *rdev)
if (r)
return r;
- evergreen_mc_program(rdev);
if (rdev->flags & RADEON_IS_AGP) {
evergreen_agp_enable(rdev);
} else {
@@ -5285,10 +5292,10 @@ int evergreen_resume(struct radeon_device *rdev)
int evergreen_suspend(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
+ r600_uvd_stop(rdev);
radeon_uvd_suspend(rdev);
r700_cp_stop(rdev);
r600_dma_stop(rdev);
- r600_uvd_rbc_stop(rdev);
evergreen_irq_suspend(rdev);
radeon_wb_disable(rdev);
evergreen_pcie_gart_disable(rdev);
@@ -5423,6 +5430,7 @@ void evergreen_fini(struct radeon_device *rdev)
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
evergreen_pcie_gart_fini(rdev);
+ r600_uvd_stop(rdev);
radeon_uvd_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
@@ -5509,6 +5517,9 @@ void evergreen_program_aspm(struct radeon_device *rdev)
*/
bool fusion_platform = false;
+ if (radeon_aspm == 0)
+ return;
+
if (!(rdev->flags & RADEON_IS_PCIE))
return;
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index b9c6f76..b0e2800 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -148,18 +148,40 @@ static void evergreen_audio_set_dto(struct drm_encoder *encoder, u32 clock)
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
u32 base_rate = 24000;
+ u32 max_ratio = clock / base_rate;
+ u32 dto_phase;
+ u32 dto_modulo = clock;
+ u32 wallclock_ratio;
+ u32 dto_cntl;
if (!dig || !dig->afmt)
return;
+ if (max_ratio >= 8) {
+ dto_phase = 192 * 1000;
+ wallclock_ratio = 3;
+ } else if (max_ratio >= 4) {
+ dto_phase = 96 * 1000;
+ wallclock_ratio = 2;
+ } else if (max_ratio >= 2) {
+ dto_phase = 48 * 1000;
+ wallclock_ratio = 1;
+ } else {
+ dto_phase = 24 * 1000;
+ wallclock_ratio = 0;
+ }
+ dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
+ dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
+ WREG32(DCCG_AUDIO_DTO0_CNTL, dto_cntl);
+
/* XXX two dtos; generally use dto0 for hdmi */
/* Express [24MHz / target pixel clock] as an exact rational
* number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
* is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
*/
- WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
- WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
WREG32(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL(radeon_crtc->crtc_id));
+ WREG32(DCCG_AUDIO_DTO0_PHASE, dto_phase);
+ WREG32(DCCG_AUDIO_DTO0_MODULE, dto_modulo);
}
@@ -177,6 +199,9 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
uint32_t offset;
ssize_t err;
+ if (!dig || !dig->afmt)
+ return;
+
/* Silent, r600_hdmi_enable will raise WARN for us */
if (!dig->afmt->enabled)
return;
@@ -280,6 +305,9 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ if (!dig || !dig->afmt)
+ return;
+
/* Silent, r600_hdmi_enable will raise WARN for us */
if (enable && dig->afmt->enabled)
return;
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index a7baf67..0d582ac 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -497,6 +497,9 @@
#define DCCG_AUDIO_DTO0_MODULE 0x05b4
#define DCCG_AUDIO_DTO0_LOAD 0x05b8
#define DCCG_AUDIO_DTO0_CNTL 0x05bc
+# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO(x) (((x) & 7) << 0)
+# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK 7
+# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_SHIFT 0
#define DCCG_AUDIO_DTO1_PHASE 0x05c0
#define DCCG_AUDIO_DTO1_MODULE 0x05c4
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index f30127c..ccb4f8b5 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -22,7 +22,6 @@
* Authors: Alex Deucher
*/
#include <linux/firmware.h>
-#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <drm/drmP.h>
@@ -684,7 +683,6 @@ int ni_mc_load_microcode(struct radeon_device *rdev)
int ni_init_microcode(struct radeon_device *rdev)
{
- struct platform_device *pdev;
const char *chip_name;
const char *rlc_chip_name;
size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size;
@@ -694,13 +692,6 @@ int ni_init_microcode(struct radeon_device *rdev)
DRM_DEBUG("\n");
- pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
- err = IS_ERR(pdev);
- if (err) {
- printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
- return -EINVAL;
- }
-
switch (rdev->family) {
case CHIP_BARTS:
chip_name = "BARTS";
@@ -753,7 +744,7 @@ int ni_init_microcode(struct radeon_device *rdev)
DRM_INFO("Loading %s Microcode\n", chip_name);
snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
- err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
+ err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
if (err)
goto out;
if (rdev->pfp_fw->size != pfp_req_size) {
@@ -765,7 +756,7 @@ int ni_init_microcode(struct radeon_device *rdev)
}
snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
- err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
+ err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
if (err)
goto out;
if (rdev->me_fw->size != me_req_size) {
@@ -776,7 +767,7 @@ int ni_init_microcode(struct radeon_device *rdev)
}
snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
- err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
+ err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
if (err)
goto out;
if (rdev->rlc_fw->size != rlc_req_size) {
@@ -789,7 +780,7 @@ int ni_init_microcode(struct radeon_device *rdev)
/* no MC ucode on TN */
if (!(rdev->flags & RADEON_IS_IGP)) {
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
- err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev);
+ err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
if (err)
goto out;
if (rdev->mc_fw->size != mc_req_size) {
@@ -802,10 +793,14 @@ int ni_init_microcode(struct radeon_device *rdev)
if ((rdev->family >= CHIP_BARTS) && (rdev->family <= CHIP_CAYMAN)) {
snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
- err = request_firmware(&rdev->smc_fw, fw_name, &pdev->dev);
- if (err)
- goto out;
- if (rdev->smc_fw->size != smc_req_size) {
+ err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
+ if (err) {
+ printk(KERN_ERR
+ "smc: error loading firmware \"%s\"\n",
+ fw_name);
+ release_firmware(rdev->smc_fw);
+ rdev->smc_fw = NULL;
+ } else if (rdev->smc_fw->size != smc_req_size) {
printk(KERN_ERR
"ni_mc: Bogus length %zu in firmware \"%s\"\n",
rdev->mc_fw->size, fw_name);
@@ -814,8 +809,6 @@ int ni_init_microcode(struct radeon_device *rdev)
}
out:
- platform_device_unregister(pdev);
-
if (err) {
if (err != -EINVAL)
printk(KERN_ERR
@@ -2090,6 +2083,8 @@ static int cayman_startup(struct radeon_device *rdev)
/* enable aspm */
evergreen_program_aspm(rdev);
+ evergreen_mc_program(rdev);
+
if (rdev->flags & RADEON_IS_IGP) {
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
r = ni_init_microcode(rdev);
@@ -2118,7 +2113,6 @@ static int cayman_startup(struct radeon_device *rdev)
if (r)
return r;
- evergreen_mc_program(rdev);
r = cayman_pcie_gart_enable(rdev);
if (r)
return r;
@@ -2297,7 +2291,7 @@ int cayman_suspend(struct radeon_device *rdev)
radeon_vm_manager_fini(rdev);
cayman_cp_enable(rdev, false);
cayman_dma_stop(rdev);
- r600_uvd_rbc_stop(rdev);
+ r600_uvd_stop(rdev);
radeon_uvd_suspend(rdev);
evergreen_irq_suspend(rdev);
radeon_wb_disable(rdev);
@@ -2429,6 +2423,7 @@ void cayman_fini(struct radeon_device *rdev)
radeon_vm_manager_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
+ r600_uvd_stop(rdev);
radeon_uvd_fini(rdev);
cayman_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
@@ -2461,6 +2456,167 @@ void cayman_vm_fini(struct radeon_device *rdev)
{
}
+/**
+ * cayman_vm_decode_fault - print human readable fault info
+ *
+ * @rdev: radeon_device pointer
+ * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
+ * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
+ *
+ * Print human readable fault information (cayman/TN).
+ */
+void cayman_vm_decode_fault(struct radeon_device *rdev,
+ u32 status, u32 addr)
+{
+ u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
+ u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
+ u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
+ char *block;
+
+ switch (mc_id) {
+ case 32:
+ case 16:
+ case 96:
+ case 80:
+ case 160:
+ case 144:
+ case 224:
+ case 208:
+ block = "CB";
+ break;
+ case 33:
+ case 17:
+ case 97:
+ case 81:
+ case 161:
+ case 145:
+ case 225:
+ case 209:
+ block = "CB_FMASK";
+ break;
+ case 34:
+ case 18:
+ case 98:
+ case 82:
+ case 162:
+ case 146:
+ case 226:
+ case 210:
+ block = "CB_CMASK";
+ break;
+ case 35:
+ case 19:
+ case 99:
+ case 83:
+ case 163:
+ case 147:
+ case 227:
+ case 211:
+ block = "CB_IMMED";
+ break;
+ case 36:
+ case 20:
+ case 100:
+ case 84:
+ case 164:
+ case 148:
+ case 228:
+ case 212:
+ block = "DB";
+ break;
+ case 37:
+ case 21:
+ case 101:
+ case 85:
+ case 165:
+ case 149:
+ case 229:
+ case 213:
+ block = "DB_HTILE";
+ break;
+ case 38:
+ case 22:
+ case 102:
+ case 86:
+ case 166:
+ case 150:
+ case 230:
+ case 214:
+ block = "SX";
+ break;
+ case 39:
+ case 23:
+ case 103:
+ case 87:
+ case 167:
+ case 151:
+ case 231:
+ case 215:
+ block = "DB_STEN";
+ break;
+ case 40:
+ case 24:
+ case 104:
+ case 88:
+ case 232:
+ case 216:
+ case 168:
+ case 152:
+ block = "TC_TFETCH";
+ break;
+ case 41:
+ case 25:
+ case 105:
+ case 89:
+ case 233:
+ case 217:
+ case 169:
+ case 153:
+ block = "TC_VFETCH";
+ break;
+ case 42:
+ case 26:
+ case 106:
+ case 90:
+ case 234:
+ case 218:
+ case 170:
+ case 154:
+ block = "VC";
+ break;
+ case 112:
+ block = "CP";
+ break;
+ case 113:
+ case 114:
+ block = "SH";
+ break;
+ case 115:
+ block = "VGT";
+ break;
+ case 178:
+ block = "IH";
+ break;
+ case 51:
+ block = "RLC";
+ break;
+ case 55:
+ block = "DMA";
+ break;
+ case 56:
+ block = "HDP";
+ break;
+ default:
+ block = "unknown";
+ break;
+ }
+
+ printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n",
+ protections, vmid, addr,
+ (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
+ block, mc_id);
+}
+
#define R600_ENTRY_VALID (1 << 0)
#define R600_PTE_SYSTEM (1 << 1)
#define R600_PTE_SNOOPED (1 << 2)
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index 559cf24..f0f5f74 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -1054,10 +1054,6 @@ static int ni_restrict_performance_levels_before_switch(struct radeon_device *rd
int ni_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level)
{
- struct radeon_ps *rps = rdev->pm.dpm.current_ps;
- struct ni_ps *ps = ni_get_ps(rps);
- u32 levels;
-
if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) != PPSMC_Result_OK)
return -EINVAL;
@@ -1068,8 +1064,7 @@ int ni_dpm_force_performance_level(struct radeon_device *rdev,
if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
return -EINVAL;
- levels = ps->performance_level_count - 1;
- if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
+ if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 1) != PPSMC_Result_OK)
return -EINVAL;
} else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
@@ -4072,9 +4067,6 @@ int ni_dpm_init(struct radeon_device *rdev)
struct rv7xx_power_info *pi;
struct evergreen_power_info *eg_pi;
struct ni_power_info *ni_pi;
- int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
- u16 data_offset, size;
- u8 frev, crev;
struct atom_clock_dividers dividers;
int ret;
@@ -4167,16 +4159,7 @@ int ni_dpm_init(struct radeon_device *rdev)
eg_pi->vddci_control =
radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0);
- if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
- &frev, &crev, &data_offset)) {
- pi->sclk_ss = true;
- pi->mclk_ss = true;
- pi->dynamic_ss = true;
- } else {
- pi->sclk_ss = false;
- pi->mclk_ss = false;
- pi->dynamic_ss = true;
- }
+ rv770_get_engine_memory_ss(rdev);
pi->asi = RV770_ASI_DFLT;
pi->pasi = CYPRESS_HASI_DFLT;
@@ -4193,8 +4176,7 @@ int ni_dpm_init(struct radeon_device *rdev)
pi->dynamic_pcie_gen2 = true;
- if (pi->gfx_clock_gating &&
- (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
+ if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
pi->thermal_protection = true;
else
pi->thermal_protection = false;
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index fe24a93..22421bc 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -133,6 +133,22 @@
#define VM_CONTEXT1_CNTL2 0x1434
#define VM_INVALIDATE_REQUEST 0x1478
#define VM_INVALIDATE_RESPONSE 0x147c
+#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC
+#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC
+#define PROTECTIONS_MASK (0xf << 0)
+#define PROTECTIONS_SHIFT 0
+ /* bit 0: range
+ * bit 2: pde0
+ * bit 3: valid
+ * bit 4: read
+ * bit 5: write
+ */
+#define MEMORY_CLIENT_ID_MASK (0xff << 12)
+#define MEMORY_CLIENT_ID_SHIFT 12
+#define MEMORY_CLIENT_RW_MASK (1 << 24)
+#define MEMORY_CLIENT_RW_SHIFT 24
+#define FAULT_VMID_MASK (0x7 << 25)
+#define FAULT_VMID_SHIFT 25
#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x1518
#define VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR 0x151c
#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153C
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index c9affef..75349cd 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -39,7 +39,6 @@
#include "atom.h"
#include <linux/firmware.h>
-#include <linux/platform_device.h>
#include <linux/module.h>
#include "r100_reg_safe.h"
@@ -989,18 +988,11 @@ void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
/* Load the microcode for the CP */
static int r100_cp_init_microcode(struct radeon_device *rdev)
{
- struct platform_device *pdev;
const char *fw_name = NULL;
int err;
DRM_DEBUG_KMS("\n");
- pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
- err = IS_ERR(pdev);
- if (err) {
- printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
- return -EINVAL;
- }
if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) ||
(rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) ||
(rdev->family == CHIP_RS200)) {
@@ -1042,8 +1034,7 @@ static int r100_cp_init_microcode(struct radeon_device *rdev)
fw_name = FIRMWARE_R520;
}
- err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
- platform_device_unregister(pdev);
+ err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
if (err) {
printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n",
fw_name);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 2d3655f..e66e720 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -28,7 +28,6 @@
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/firmware.h>
-#include <linux/platform_device.h>
#include <linux/module.h>
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
@@ -2144,7 +2143,6 @@ void r600_cp_stop(struct radeon_device *rdev)
int r600_init_microcode(struct radeon_device *rdev)
{
- struct platform_device *pdev;
const char *chip_name;
const char *rlc_chip_name;
const char *smc_chip_name = "RV770";
@@ -2154,13 +2152,6 @@ int r600_init_microcode(struct radeon_device *rdev)
DRM_DEBUG("\n");
- pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
- err = IS_ERR(pdev);
- if (err) {
- printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
- return -EINVAL;
- }
-
switch (rdev->family) {
case CHIP_R600:
chip_name = "R600";
@@ -2272,7 +2263,7 @@ int r600_init_microcode(struct radeon_device *rdev)
DRM_INFO("Loading %s Microcode\n", chip_name);
snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
- err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
+ err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
if (err)
goto out;
if (rdev->pfp_fw->size != pfp_req_size) {
@@ -2284,7 +2275,7 @@ int r600_init_microcode(struct radeon_device *rdev)
}
snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
- err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
+ err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
if (err)
goto out;
if (rdev->me_fw->size != me_req_size) {
@@ -2295,7 +2286,7 @@ int r600_init_microcode(struct radeon_device *rdev)
}
snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
- err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
+ err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
if (err)
goto out;
if (rdev->rlc_fw->size != rlc_req_size) {
@@ -2307,10 +2298,14 @@ int r600_init_microcode(struct radeon_device *rdev)
if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) {
snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", smc_chip_name);
- err = request_firmware(&rdev->smc_fw, fw_name, &pdev->dev);
- if (err)
- goto out;
- if (rdev->smc_fw->size != smc_req_size) {
+ err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
+ if (err) {
+ printk(KERN_ERR
+ "smc: error loading firmware \"%s\"\n",
+ fw_name);
+ release_firmware(rdev->smc_fw);
+ rdev->smc_fw = NULL;
+ } else if (rdev->smc_fw->size != smc_req_size) {
printk(KERN_ERR
"smc: Bogus length %zu in firmware \"%s\"\n",
rdev->smc_fw->size, fw_name);
@@ -2319,8 +2314,6 @@ int r600_init_microcode(struct radeon_device *rdev)
}
out:
- platform_device_unregister(pdev);
-
if (err) {
if (err != -EINVAL)
printk(KERN_ERR
@@ -2708,12 +2701,29 @@ int r600_uvd_rbc_start(struct radeon_device *rdev)
return 0;
}
-void r600_uvd_rbc_stop(struct radeon_device *rdev)
+void r600_uvd_stop(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
/* force RBC into idle state */
WREG32(UVD_RBC_RB_CNTL, 0x11010101);
+
+ /* Stall UMC and register bus before resetting VCPU */
+ WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
+ WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
+ mdelay(1);
+
+ /* put VCPU into reset */
+ WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
+ mdelay(5);
+
+ /* disable VCPU clock */
+ WREG32(UVD_VCPU_CNTL, 0x0);
+
+ /* Unstall UMC and register bus */
+ WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
+ WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
+
ring->ready = false;
}
@@ -2733,6 +2743,11 @@ int r600_uvd_init(struct radeon_device *rdev)
/* disable interupt */
WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1));
+ /* Stall UMC and register bus before resetting VCPU */
+ WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
+ WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
+ mdelay(1);
+
/* put LMI, VCPU, RBC etc... into reset */
WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET |
LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET |
@@ -2762,10 +2777,6 @@ int r600_uvd_init(struct radeon_device *rdev)
WREG32(UVD_MPC_SET_ALU, 0);
WREG32(UVD_MPC_SET_MUX, 0x88);
- /* Stall UMC */
- WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
- WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
-
/* take all subblocks out of reset, except VCPU */
WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
mdelay(5);
@@ -3019,7 +3030,7 @@ void r600_uvd_fence_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
struct radeon_ring *ring = &rdev->ring[fence->ring];
- uint32_t addr = rdev->fence_drv[fence->ring].gpu_addr;
+ uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr;
radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
radeon_ring_write(ring, fence->seq);
@@ -3145,6 +3156,90 @@ int r600_copy_blit(struct radeon_device *rdev,
}
/**
+ * r600_copy_cpdma - copy pages using the CP DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the CP DMA engine (r6xx+).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int r600_copy_cpdma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
+{
+ struct radeon_semaphore *sem = NULL;
+ int ring_index = rdev->asic->copy.blit_ring_index;
+ struct radeon_ring *ring = &rdev->ring[ring_index];
+ u32 size_in_bytes, cur_size_in_bytes, tmp;
+ int i, num_loops;
+ int r = 0;
+
+ r = radeon_semaphore_create(rdev, &sem);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ return r;
+ }
+
+ size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
+ num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
+ r = radeon_ring_lock(rdev, ring, num_loops * 6 + 24);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ radeon_semaphore_free(rdev, &sem, NULL);
+ return r;
+ }
+
+ if (radeon_fence_need_sync(*fence, ring->idx)) {
+ radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+ ring->idx);
+ radeon_fence_note_sync(*fence, ring->idx);
+ } else {
+ radeon_semaphore_free(rdev, &sem, NULL);
+ }
+
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, WAIT_3D_IDLE_bit);
+ for (i = 0; i < num_loops; i++) {
+ cur_size_in_bytes = size_in_bytes;
+ if (cur_size_in_bytes > 0x1fffff)
+ cur_size_in_bytes = 0x1fffff;
+ size_in_bytes -= cur_size_in_bytes;
+ tmp = upper_32_bits(src_offset) & 0xff;
+ if (size_in_bytes == 0)
+ tmp |= PACKET3_CP_DMA_CP_SYNC;
+ radeon_ring_write(ring, PACKET3(PACKET3_CP_DMA, 4));
+ radeon_ring_write(ring, src_offset & 0xffffffff);
+ radeon_ring_write(ring, tmp);
+ radeon_ring_write(ring, dst_offset & 0xffffffff);
+ radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+ radeon_ring_write(ring, cur_size_in_bytes);
+ src_offset += cur_size_in_bytes;
+ dst_offset += cur_size_in_bytes;
+ }
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, WAIT_CP_DMA_IDLE_bit);
+
+ r = radeon_fence_emit(rdev, fence, ring->idx);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, *fence);
+
+ return r;
+}
+
+/**
* r600_copy_dma - copy pages using the DMA engine
*
* @rdev: radeon_device pointer
@@ -3239,6 +3334,8 @@ static int r600_startup(struct radeon_device *rdev)
/* enable pcie gen2 link */
r600_pcie_gen2_enable(rdev);
+ r600_mc_program(rdev);
+
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
r = r600_init_microcode(rdev);
if (r) {
@@ -3251,7 +3348,6 @@ static int r600_startup(struct radeon_device *rdev)
if (r)
return r;
- r600_mc_program(rdev);
if (rdev->flags & RADEON_IS_AGP) {
r600_agp_enable(rdev);
} else {
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index b88f54b..e5c860f 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -278,9 +278,9 @@ bool r600_dynamicpm_enabled(struct radeon_device *rdev)
void r600_enable_sclk_control(struct radeon_device *rdev, bool enable)
{
if (enable)
- WREG32_P(GENERAL_PWRMGT, 0, ~SCLK_PWRMGT_OFF);
+ WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
else
- WREG32_P(GENERAL_PWRMGT, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
+ WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
}
void r600_enable_mclk_control(struct radeon_device *rdev, bool enable)
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index e73b2a7..f264df5 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -226,10 +226,29 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
u32 base_rate = 24000;
+ u32 max_ratio = clock / base_rate;
+ u32 dto_phase;
+ u32 dto_modulo = clock;
+ u32 wallclock_ratio;
+ u32 dto_cntl;
if (!dig || !dig->afmt)
return;
+ if (max_ratio >= 8) {
+ dto_phase = 192 * 1000;
+ wallclock_ratio = 3;
+ } else if (max_ratio >= 4) {
+ dto_phase = 96 * 1000;
+ wallclock_ratio = 2;
+ } else if (max_ratio >= 2) {
+ dto_phase = 48 * 1000;
+ wallclock_ratio = 1;
+ } else {
+ dto_phase = 24 * 1000;
+ wallclock_ratio = 0;
+ }
+
/* there are two DTOs selected by DCCG_AUDIO_DTO_SELECT.
* doesn't matter which one you use. Just use the first one.
*/
@@ -242,9 +261,21 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
/* according to the reg specs, this should DCE3.2 only, but in
* practice it seems to cover DCE3.0 as well.
*/
- WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
- WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
- WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
+ if (dig->dig_encoder == 0) {
+ dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
+ dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
+ WREG32(DCCG_AUDIO_DTO0_CNTL, dto_cntl);
+ WREG32(DCCG_AUDIO_DTO0_PHASE, dto_phase);
+ WREG32(DCCG_AUDIO_DTO0_MODULE, dto_modulo);
+ WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
+ } else {
+ dto_cntl = RREG32(DCCG_AUDIO_DTO1_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
+ dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
+ WREG32(DCCG_AUDIO_DTO1_CNTL, dto_cntl);
+ WREG32(DCCG_AUDIO_DTO1_PHASE, dto_phase);
+ WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo);
+ WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
+ }
} else {
/* according to the reg specs, this should be DCE2.0 and DCE3.0 */
WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) |
@@ -266,6 +297,9 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
uint32_t offset;
ssize_t err;
+ if (!dig || !dig->afmt)
+ return;
+
/* Silent, r600_hdmi_enable will raise WARN for us */
if (!dig->afmt->enabled)
return;
@@ -448,6 +482,9 @@ void r600_hdmi_enable(struct drm_encoder *encoder, bool enable)
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
u32 hdmi = HDMI0_ERROR_ACK;
+ if (!dig || !dig->afmt)
+ return;
+
/* Silent, r600_hdmi_enable will raise WARN for us */
if (enable && dig->afmt->enabled)
return;
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index f1b3084..7c78083 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -602,6 +602,7 @@
#define L2_BUSY (1 << 0)
#define WAIT_UNTIL 0x8040
+#define WAIT_CP_DMA_IDLE_bit (1 << 8)
#define WAIT_2D_IDLE_bit (1 << 14)
#define WAIT_3D_IDLE_bit (1 << 15)
#define WAIT_2D_IDLECLEAN_bit (1 << 16)
@@ -932,6 +933,9 @@
#define DCCG_AUDIO_DTO0_LOAD 0x051c
# define DTO_LOAD (1 << 31)
#define DCCG_AUDIO_DTO0_CNTL 0x0520
+# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO(x) (((x) & 7) << 0)
+# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK 7
+# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_SHIFT 0
#define DCCG_AUDIO_DTO1_PHASE 0x0524
#define DCCG_AUDIO_DTO1_MODULE 0x0528
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 9b7025d..9f19259 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -97,6 +97,7 @@ extern int radeon_msi;
extern int radeon_lockup_timeout;
extern int radeon_fastfb;
extern int radeon_dpm;
+extern int radeon_aspm;
/*
* Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -455,6 +456,7 @@ struct radeon_sa_manager {
uint64_t gpu_addr;
void *cpu_ptr;
uint32_t domain;
+ uint32_t align;
};
struct radeon_sa_bo;
@@ -783,6 +785,11 @@ struct radeon_mec {
/* number of entries in page table */
#define RADEON_VM_PTE_COUNT (1 << RADEON_VM_BLOCK_SIZE)
+/* PTBs (Page Table Blocks) need to be aligned to 32K */
+#define RADEON_VM_PTB_ALIGN_SIZE 32768
+#define RADEON_VM_PTB_ALIGN_MASK (RADEON_VM_PTB_ALIGN_SIZE - 1)
+#define RADEON_VM_PTB_ALIGN(a) (((a) + RADEON_VM_PTB_ALIGN_MASK) & ~RADEON_VM_PTB_ALIGN_MASK)
+
struct radeon_vm {
struct list_head list;
struct list_head va;
@@ -1460,6 +1467,7 @@ struct radeon_uvd {
struct radeon_bo *vcpu_bo;
void *cpu_addr;
uint64_t gpu_addr;
+ void *saved_bo;
atomic_t handles[RADEON_MAX_UVD_HANDLES];
struct drm_file *filp[RADEON_MAX_UVD_HANDLES];
struct delayed_work idle_work;
@@ -2054,10 +2062,10 @@ struct radeon_device {
const struct firmware *rlc_fw; /* r6/700 RLC firmware */
const struct firmware *mc_fw; /* NI MC firmware */
const struct firmware *ce_fw; /* SI CE firmware */
- const struct firmware *uvd_fw; /* UVD firmware */
const struct firmware *mec_fw; /* CIK MEC firmware */
const struct firmware *sdma_fw; /* CIK SDMA firmware */
const struct firmware *smc_fw; /* SMC firmware */
+ const struct firmware *uvd_fw; /* UVD firmware */
struct r600_blit r600_blit;
struct r600_vram_scratch vram_scratch;
int msi_enabled; /* msi enabled */
@@ -2087,6 +2095,8 @@ struct radeon_device {
/* ACPI interface */
struct radeon_atif atif;
struct radeon_atcs atcs;
+ /* srbm instance registers */
+ struct mutex srbm_mutex;
};
int radeon_device_init(struct radeon_device *rdev,
@@ -2153,7 +2163,7 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v);
WREG32(reg, tmp_); \
} while (0)
#define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
-#define WREG32_OR(reg, or) WREG32_P(reg, or, ~or)
+#define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
#define WREG32_PLL_P(reg, val, mask) \
do { \
uint32_t tmp_ = RREG32_PLL(reg); \
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 0970774..f8f8b31 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -1026,8 +1026,8 @@ static struct radeon_asic r600_asic = {
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &r600_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
- .copy = &r600_copy_dma,
- .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &r600_copy_cpdma,
+ .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
@@ -1119,8 +1119,8 @@ static struct radeon_asic rv6xx_asic = {
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &r600_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
- .copy = &r600_copy_dma,
- .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &r600_copy_cpdma,
+ .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
@@ -1161,6 +1161,7 @@ static struct radeon_asic rv6xx_asic = {
.get_mclk = &rv6xx_dpm_get_mclk,
.print_power_state = &rv6xx_dpm_print_power_state,
.debugfs_print_current_performance_level = &rv6xx_dpm_debugfs_print_current_performance_level,
+ .force_performance_level = &rv6xx_dpm_force_performance_level,
},
.pflip = {
.pre_page_flip = &rs600_pre_page_flip,
@@ -1229,8 +1230,8 @@ static struct radeon_asic rs780_asic = {
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &r600_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
- .copy = &r600_copy_dma,
- .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &r600_copy_cpdma,
+ .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
@@ -1270,6 +1271,7 @@ static struct radeon_asic rs780_asic = {
.get_sclk = &rs780_dpm_get_sclk,
.get_mclk = &rs780_dpm_get_mclk,
.print_power_state = &rs780_dpm_print_power_state,
+ .debugfs_print_current_performance_level = &rs780_dpm_debugfs_print_current_performance_level,
},
.pflip = {
.pre_page_flip = &rs600_pre_page_flip,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 45d0693..3d61d5a 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -340,6 +340,9 @@ int r600_uvd_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages, struct radeon_fence **fence);
+int r600_copy_cpdma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages, struct radeon_fence **fence);
int r600_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages, struct radeon_fence **fence);
@@ -418,6 +421,8 @@ void rv6xx_dpm_print_power_state(struct radeon_device *rdev,
struct radeon_ps *ps);
void rv6xx_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m);
+int rv6xx_dpm_force_performance_level(struct radeon_device *rdev,
+ enum radeon_dpm_forced_level level);
/* rs780 dpm */
int rs780_dpm_init(struct radeon_device *rdev);
int rs780_dpm_enable(struct radeon_device *rdev);
@@ -430,11 +435,13 @@ u32 rs780_dpm_get_sclk(struct radeon_device *rdev, bool low);
u32 rs780_dpm_get_mclk(struct radeon_device *rdev, bool low);
void rs780_dpm_print_power_state(struct radeon_device *rdev,
struct radeon_ps *ps);
+void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+ struct seq_file *m);
/* uvd */
int r600_uvd_init(struct radeon_device *rdev);
int r600_uvd_rbc_start(struct radeon_device *rdev);
-void r600_uvd_rbc_stop(struct radeon_device *rdev);
+void r600_uvd_stop(struct radeon_device *rdev);
int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
void r600_uvd_fence_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index fbdaff5..4ccd61f 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -2782,7 +2782,7 @@ int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false;
dividers->enable_dithen = (args.v3.ucCntlFlag &
ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true;
- dividers->fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDiv);
+ dividers->whole_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDiv);
dividers->frac_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDivFrac);
dividers->ref_div = args.v3.ucRefDiv;
dividers->vco_mode = (args.v3.ucCntlFlag &
@@ -3513,7 +3513,6 @@ int radeon_atom_get_memory_info(struct radeon_device *rdev,
u8 frev, crev, i;
u16 data_offset, size;
union vram_info *vram_info;
- u8 *p;
memset(mem_info, 0, sizeof(struct atom_memory_info));
@@ -3529,13 +3528,12 @@ int radeon_atom_get_memory_info(struct radeon_device *rdev,
if (module_index < vram_info->v1_3.ucNumOfVRAMModule) {
ATOM_VRAM_MODULE_V3 *vram_module =
(ATOM_VRAM_MODULE_V3 *)vram_info->v1_3.aVramInfo;
- p = (u8 *)vram_info->v1_3.aVramInfo;
for (i = 0; i < module_index; i++) {
- vram_module = (ATOM_VRAM_MODULE_V3 *)p;
if (le16_to_cpu(vram_module->usSize) == 0)
return -EINVAL;
- p += le16_to_cpu(vram_module->usSize);
+ vram_module = (ATOM_VRAM_MODULE_V3 *)
+ ((u8 *)vram_module + le16_to_cpu(vram_module->usSize));
}
mem_info->mem_vendor = vram_module->asMemory.ucMemoryVenderID & 0xf;
mem_info->mem_type = vram_module->asMemory.ucMemoryType & 0xf0;
@@ -3547,13 +3545,12 @@ int radeon_atom_get_memory_info(struct radeon_device *rdev,
if (module_index < vram_info->v1_4.ucNumOfVRAMModule) {
ATOM_VRAM_MODULE_V4 *vram_module =
(ATOM_VRAM_MODULE_V4 *)vram_info->v1_4.aVramInfo;
- p = (u8 *)vram_info->v1_4.aVramInfo;
for (i = 0; i < module_index; i++) {
- vram_module = (ATOM_VRAM_MODULE_V4 *)p;
if (le16_to_cpu(vram_module->usModuleSize) == 0)
return -EINVAL;
- p += le16_to_cpu(vram_module->usModuleSize);
+ vram_module = (ATOM_VRAM_MODULE_V4 *)
+ ((u8 *)vram_module + le16_to_cpu(vram_module->usModuleSize));
}
mem_info->mem_vendor = vram_module->ucMemoryVenderID & 0xf;
mem_info->mem_type = vram_module->ucMemoryType & 0xf0;
@@ -3572,13 +3569,12 @@ int radeon_atom_get_memory_info(struct radeon_device *rdev,
if (module_index < vram_info->v2_1.ucNumOfVRAMModule) {
ATOM_VRAM_MODULE_V7 *vram_module =
(ATOM_VRAM_MODULE_V7 *)vram_info->v2_1.aVramInfo;
- p = (u8 *)vram_info->v2_1.aVramInfo;
for (i = 0; i < module_index; i++) {
- vram_module = (ATOM_VRAM_MODULE_V7 *)p;
if (le16_to_cpu(vram_module->usModuleSize) == 0)
return -EINVAL;
- p += le16_to_cpu(vram_module->usModuleSize);
+ vram_module = (ATOM_VRAM_MODULE_V7 *)
+ ((u8 *)vram_module + le16_to_cpu(vram_module->usModuleSize));
}
mem_info->mem_vendor = vram_module->ucMemoryVenderID & 0xf;
mem_info->mem_type = vram_module->ucMemoryType & 0xf0;
@@ -3628,21 +3624,19 @@ int radeon_atom_get_mclk_range_table(struct radeon_device *rdev,
if (module_index < vram_info->v1_4.ucNumOfVRAMModule) {
ATOM_VRAM_MODULE_V4 *vram_module =
(ATOM_VRAM_MODULE_V4 *)vram_info->v1_4.aVramInfo;
- ATOM_MEMORY_TIMING_FORMAT *format;
- p = (u8 *)vram_info->v1_4.aVramInfo;
for (i = 0; i < module_index; i++) {
- vram_module = (ATOM_VRAM_MODULE_V4 *)p;
if (le16_to_cpu(vram_module->usModuleSize) == 0)
return -EINVAL;
- p += le16_to_cpu(vram_module->usModuleSize);
+ vram_module = (ATOM_VRAM_MODULE_V4 *)
+ ((u8 *)vram_module + le16_to_cpu(vram_module->usModuleSize));
}
mclk_range_table->num_entries = (u8)
- ((vram_module->usModuleSize - offsetof(ATOM_VRAM_MODULE_V4, asMemTiming)) /
+ ((le16_to_cpu(vram_module->usModuleSize) - offsetof(ATOM_VRAM_MODULE_V4, asMemTiming)) /
mem_timing_size);
- p = (u8 *)vram_module->asMemTiming;
+ p = (u8 *)&vram_module->asMemTiming[0];
for (i = 0; i < mclk_range_table->num_entries; i++) {
- format = (ATOM_MEMORY_TIMING_FORMAT *)p;
+ ATOM_MEMORY_TIMING_FORMAT *format = (ATOM_MEMORY_TIMING_FORMAT *)p;
mclk_range_table->mclk[i] = le32_to_cpu(format->ulClkRange);
p += mem_timing_size;
}
@@ -3705,17 +3699,21 @@ int radeon_atom_init_mc_reg_table(struct radeon_device *rdev,
(ATOM_MEMORY_SETTING_DATA_BLOCK *)
((u8 *)reg_block + (2 * sizeof(u16)) +
le16_to_cpu(reg_block->usRegIndexTblSize));
+ ATOM_INIT_REG_INDEX_FORMAT *format = &reg_block->asRegIndexBuf[0];
num_entries = (u8)((le16_to_cpu(reg_block->usRegIndexTblSize)) /
sizeof(ATOM_INIT_REG_INDEX_FORMAT)) - 1;
if (num_entries > VBIOS_MC_REGISTER_ARRAY_SIZE)
return -EINVAL;
- while (!(reg_block->asRegIndexBuf[i].ucPreRegDataLength & ACCESS_PLACEHOLDER) &&
- (i < num_entries)) {
+ while (i < num_entries) {
+ if (format->ucPreRegDataLength & ACCESS_PLACEHOLDER)
+ break;
reg_table->mc_reg_address[i].s1 =
- (u16)(le16_to_cpu(reg_block->asRegIndexBuf[i].usRegIndex));
+ (u16)(le16_to_cpu(format->usRegIndex));
reg_table->mc_reg_address[i].pre_reg_data =
- (u8)(reg_block->asRegIndexBuf[i].ucPreRegDataLength);
+ (u8)(format->ucPreRegDataLength);
i++;
+ format = (ATOM_INIT_REG_INDEX_FORMAT *)
+ ((u8 *)format + sizeof(ATOM_INIT_REG_INDEX_FORMAT));
}
reg_table->last = i;
while ((*(u32 *)reg_data != END_OF_REG_DATA_BLOCK) &&
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 78edadc..68ce360 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -147,7 +147,7 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
enum radeon_combios_table_offset table)
{
struct radeon_device *rdev = dev->dev_private;
- int rev;
+ int rev, size;
uint16_t offset = 0, check_offset;
if (!rdev->bios)
@@ -156,174 +156,106 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
switch (table) {
/* absolute offset tables */
case COMBIOS_ASIC_INIT_1_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0xc);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0xc;
break;
case COMBIOS_BIOS_SUPPORT_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x14);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x14;
break;
case COMBIOS_DAC_PROGRAMMING_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x2a);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x2a;
break;
case COMBIOS_MAX_COLOR_DEPTH_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x2c);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x2c;
break;
case COMBIOS_CRTC_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x2e);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x2e;
break;
case COMBIOS_PLL_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x30);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x30;
break;
case COMBIOS_TV_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x32);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x32;
break;
case COMBIOS_DFP_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x34);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x34;
break;
case COMBIOS_HW_CONFIG_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x36);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x36;
break;
case COMBIOS_MULTIMEDIA_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x38);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x38;
break;
case COMBIOS_TV_STD_PATCH_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x3e);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x3e;
break;
case COMBIOS_LCD_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x40);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x40;
break;
case COMBIOS_MOBILE_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x42);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x42;
break;
case COMBIOS_PLL_INIT_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x46);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x46;
break;
case COMBIOS_MEM_CONFIG_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x48);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x48;
break;
case COMBIOS_SAVE_MASK_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x4a);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x4a;
break;
case COMBIOS_HARDCODED_EDID_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x4c);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x4c;
break;
case COMBIOS_ASIC_INIT_2_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x4e);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x4e;
break;
case COMBIOS_CONNECTOR_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x50);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x50;
break;
case COMBIOS_DYN_CLK_1_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x52);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x52;
break;
case COMBIOS_RESERVED_MEM_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x54);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x54;
break;
case COMBIOS_EXT_TMDS_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x58);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x58;
break;
case COMBIOS_MEM_CLK_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x5a);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x5a;
break;
case COMBIOS_EXT_DAC_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x5c);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x5c;
break;
case COMBIOS_MISC_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x5e);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x5e;
break;
case COMBIOS_CRT_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x60);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x60;
break;
case COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x62);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x62;
break;
case COMBIOS_COMPONENT_VIDEO_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x64);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x64;
break;
case COMBIOS_FAN_SPEED_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x66);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x66;
break;
case COMBIOS_OVERDRIVE_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x68);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x68;
break;
case COMBIOS_OEM_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x6a);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x6a;
break;
case COMBIOS_DYN_CLK_2_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x6c);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x6c;
break;
case COMBIOS_POWER_CONNECTOR_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x6e);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x6e;
break;
case COMBIOS_I2C_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x70);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x70;
break;
/* relative offset tables */
case COMBIOS_ASIC_INIT_3_TABLE: /* offset from misc info */
@@ -439,11 +371,16 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
}
break;
default:
+ check_offset = 0;
break;
}
- return offset;
+ size = RBIOS8(rdev->bios_header_start + 0x6);
+ /* check absolute offset tables */
+ if (table < COMBIOS_ASIC_INIT_3_TABLE && check_offset && check_offset < size)
+ offset = RBIOS16(rdev->bios_header_start + check_offset);
+ return offset;
}
bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
@@ -965,16 +902,22 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
dac = RBIOS8(dac_info + 0x3) & 0xf;
p_dac->ps2_pdac_adj = (bg << 8) | (dac);
}
- /* if the values are all zeros, use the table */
- if (p_dac->ps2_pdac_adj)
+ /* if the values are zeros, use the table */
+ if ((dac == 0) || (bg == 0))
+ found = 0;
+ else
found = 1;
}
/* quirks */
+ /* Radeon 7000 (RV100) */
+ if (((dev->pdev->device == 0x5159) &&
+ (dev->pdev->subsystem_vendor == 0x174B) &&
+ (dev->pdev->subsystem_device == 0x7c28)) ||
/* Radeon 9100 (R200) */
- if ((dev->pdev->device == 0x514D) &&
+ ((dev->pdev->device == 0x514D) &&
(dev->pdev->subsystem_vendor == 0x174B) &&
- (dev->pdev->subsystem_device == 0x7149)) {
+ (dev->pdev->subsystem_device == 0x7149))) {
/* vbios value is bad, use the default */
found = 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 82335e3..63398ae 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1163,6 +1163,7 @@ int radeon_device_init(struct radeon_device *rdev,
mutex_init(&rdev->gem.mutex);
mutex_init(&rdev->pm.mutex);
mutex_init(&rdev->gpu_clock_mutex);
+ mutex_init(&rdev->srbm_mutex);
init_rwsem(&rdev->pm.mclk_lock);
init_rwsem(&rdev->exclusive_lock);
init_waitqueue_head(&rdev->irq.vblank_queue);
@@ -1519,6 +1520,7 @@ int radeon_gpu_reset(struct radeon_device *rdev)
radeon_save_bios_scratch_regs(rdev);
/* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
+ radeon_pm_suspend(rdev);
radeon_suspend(rdev);
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
@@ -1564,6 +1566,7 @@ retry:
}
}
+ radeon_pm_resume(rdev);
drm_helper_resume_force_mode(rdev->ddev);
ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index e5419b3..29876b1 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -167,6 +167,7 @@ int radeon_msi = -1;
int radeon_lockup_timeout = 10000;
int radeon_fastfb = 0;
int radeon_dpm = -1;
+int radeon_aspm = -1;
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -225,6 +226,9 @@ module_param_named(fastfb, radeon_fastfb, int, 0444);
MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(dpm, radeon_dpm, int, 0444);
+MODULE_PARM_DESC(aspm, "ASPM support (1 = enable, 0 = disable, -1 = auto)");
+module_param_named(aspm, radeon_aspm, int, 0444);
+
static struct pci_device_id pciidlist[] = {
radeon_PCI_IDS
};
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 43ec4a4..b990b1a 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -207,7 +207,6 @@ void radeon_gart_table_vram_free(struct radeon_device *rdev)
if (rdev->gart.robj == NULL) {
return;
}
- radeon_gart_table_vram_unpin(rdev);
radeon_bo_unref(&rdev->gart.robj);
}
@@ -467,6 +466,7 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
size *= 2;
r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
RADEON_GPU_PAGE_ALIGN(size),
+ RADEON_VM_PTB_ALIGN_SIZE,
RADEON_GEM_DOMAIN_VRAM);
if (r) {
dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
@@ -620,10 +620,10 @@ int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
}
retry:
- pd_size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev));
+ pd_size = radeon_vm_directory_size(rdev);
r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
&vm->page_directory, pd_size,
- RADEON_GPU_PAGE_SIZE, false);
+ RADEON_VM_PTB_ALIGN_SIZE, false);
if (r == -ENOMEM) {
r = radeon_vm_evict(rdev, vm);
if (r)
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index bcdefd1..081886b 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -260,10 +260,6 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
{
int r = 0;
- INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
- INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
- INIT_WORK(&rdev->reset_work, radeon_irq_reset_work_func);
-
spin_lock_init(&rdev->irq.lock);
r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
if (r) {
@@ -285,6 +281,11 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
rdev->irq.installed = false;
return r;
}
+
+ INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
+ INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
+ INIT_WORK(&rdev->reset_work, radeon_irq_reset_work_func);
+
DRM_INFO("radeon: irq initialized.\n");
return 0;
}
@@ -304,8 +305,8 @@ void radeon_irq_kms_fini(struct radeon_device *rdev)
rdev->irq.installed = false;
if (rdev->msi_enabled)
pci_disable_msi(rdev->pdev);
+ flush_work(&rdev->hotplug_work);
}
- flush_work(&rdev->hotplug_work);
}
/**
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 0219d26..2020bf4 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -377,6 +377,7 @@ int radeon_bo_list_validate(struct ww_acquire_ctx *ticket,
domain = lobj->alt_domain;
goto retry;
}
+ ttm_eu_backoff_reservation(ticket, head);
return r;
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 91519a5..49c82c4 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -174,7 +174,7 @@ static inline void * radeon_sa_bo_cpu_addr(struct radeon_sa_bo *sa_bo)
extern int radeon_sa_bo_manager_init(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
- unsigned size, u32 domain);
+ unsigned size, u32 align, u32 domain);
extern void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager);
extern int radeon_sa_bo_manager_start(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index f374c46..c557850 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -1176,7 +1176,14 @@ int radeon_pm_init(struct radeon_device *rdev)
case CHIP_VERDE:
case CHIP_OLAND:
case CHIP_HAINAN:
- if (radeon_dpm == 1)
+ /* DPM requires the RLC, RV770+ dGPU requires SMC */
+ if (!rdev->rlc_fw)
+ rdev->pm.pm_method = PM_METHOD_PROFILE;
+ else if ((rdev->family >= CHIP_RV770) &&
+ (!(rdev->flags & RADEON_IS_IGP)) &&
+ (!rdev->smc_fw))
+ rdev->pm.pm_method = PM_METHOD_PROFILE;
+ else if (radeon_dpm == 1)
rdev->pm.pm_method = PM_METHOD_DPM;
else
rdev->pm.pm_method = PM_METHOD_PROFILE;
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 5f1c51a..fb5ea62 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -224,6 +224,7 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
}
r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
RADEON_IB_POOL_SIZE*64*1024,
+ RADEON_GPU_PAGE_SIZE,
RADEON_GEM_DOMAIN_GTT);
if (r) {
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index 0abe5a9..f0bac68 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -49,7 +49,7 @@ static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager);
int radeon_sa_bo_manager_init(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
- unsigned size, u32 domain)
+ unsigned size, u32 align, u32 domain)
{
int i, r;
@@ -57,13 +57,14 @@ int radeon_sa_bo_manager_init(struct radeon_device *rdev,
sa_manager->bo = NULL;
sa_manager->size = size;
sa_manager->domain = domain;
+ sa_manager->align = align;
sa_manager->hole = &sa_manager->olist;
INIT_LIST_HEAD(&sa_manager->olist);
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
INIT_LIST_HEAD(&sa_manager->flist[i]);
}
- r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true,
+ r = radeon_bo_create(rdev, size, align, true,
domain, NULL, &sa_manager->bo);
if (r) {
dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
@@ -317,7 +318,7 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
unsigned tries[RADEON_NUM_RINGS];
int i, r;
- BUG_ON(align > RADEON_GPU_PAGE_SIZE);
+ BUG_ON(align > sa_manager->align);
BUG_ON(size > sa_manager->size);
*sa_bo = kmalloc(sizeof(struct radeon_sa_bo), GFP_KERNEL);
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 41efcec..b79f4f5 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -56,20 +56,12 @@ static void radeon_uvd_idle_work_handler(struct work_struct *work);
int radeon_uvd_init(struct radeon_device *rdev)
{
- struct platform_device *pdev;
unsigned long bo_size;
const char *fw_name;
int i, r;
INIT_DELAYED_WORK(&rdev->uvd.idle_work, radeon_uvd_idle_work_handler);
- pdev = platform_device_register_simple("radeon_uvd", 0, NULL, 0);
- r = IS_ERR(pdev);
- if (r) {
- dev_err(rdev->dev, "radeon_uvd: Failed to register firmware\n");
- return -EINVAL;
- }
-
switch (rdev->family) {
case CHIP_RV710:
case CHIP_RV730:
@@ -112,16 +104,13 @@ int radeon_uvd_init(struct radeon_device *rdev)
return -EINVAL;
}
- r = request_firmware(&rdev->uvd_fw, fw_name, &pdev->dev);
+ r = request_firmware(&rdev->uvd_fw, fw_name, rdev->dev);
if (r) {
dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n",
fw_name);
- platform_device_unregister(pdev);
return r;
}
- platform_device_unregister(pdev);
-
bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) +
RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE;
r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
@@ -131,16 +120,29 @@ int radeon_uvd_init(struct radeon_device *rdev)
return r;
}
- r = radeon_uvd_resume(rdev);
- if (r)
+ r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
+ if (r) {
+ radeon_bo_unref(&rdev->uvd.vcpu_bo);
+ dev_err(rdev->dev, "(%d) failed to reserve UVD bo\n", r);
return r;
+ }
- memset(rdev->uvd.cpu_addr, 0, bo_size);
- memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
+ r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->uvd.gpu_addr);
+ if (r) {
+ radeon_bo_unreserve(rdev->uvd.vcpu_bo);
+ radeon_bo_unref(&rdev->uvd.vcpu_bo);
+ dev_err(rdev->dev, "(%d) UVD bo pin failed\n", r);
+ return r;
+ }
- r = radeon_uvd_suspend(rdev);
- if (r)
+ r = radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr);
+ if (r) {
+ dev_err(rdev->dev, "(%d) UVD map failed\n", r);
return r;
+ }
+
+ radeon_bo_unreserve(rdev->uvd.vcpu_bo);
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
atomic_set(&rdev->uvd.handles[i], 0);
@@ -152,70 +154,73 @@ int radeon_uvd_init(struct radeon_device *rdev)
void radeon_uvd_fini(struct radeon_device *rdev)
{
- radeon_uvd_suspend(rdev);
- radeon_bo_unref(&rdev->uvd.vcpu_bo);
-}
-
-int radeon_uvd_suspend(struct radeon_device *rdev)
-{
int r;
if (rdev->uvd.vcpu_bo == NULL)
- return 0;
+ return;
r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
if (!r) {
radeon_bo_kunmap(rdev->uvd.vcpu_bo);
radeon_bo_unpin(rdev->uvd.vcpu_bo);
- rdev->uvd.cpu_addr = NULL;
- if (!radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_CPU, NULL)) {
- radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr);
- }
radeon_bo_unreserve(rdev->uvd.vcpu_bo);
-
- if (rdev->uvd.cpu_addr) {
- radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
- } else {
- rdev->fence_drv[R600_RING_TYPE_UVD_INDEX].cpu_addr = NULL;
- }
}
- return r;
+
+ radeon_bo_unref(&rdev->uvd.vcpu_bo);
+
+ release_firmware(rdev->uvd_fw);
+}
+
+int radeon_uvd_suspend(struct radeon_device *rdev)
+{
+ unsigned size;
+ void *ptr;
+ int i;
+
+ if (rdev->uvd.vcpu_bo == NULL)
+ return 0;
+
+ for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i)
+ if (atomic_read(&rdev->uvd.handles[i]))
+ break;
+
+ if (i == RADEON_MAX_UVD_HANDLES)
+ return 0;
+
+ size = radeon_bo_size(rdev->uvd.vcpu_bo);
+ size -= rdev->uvd_fw->size;
+
+ ptr = rdev->uvd.cpu_addr;
+ ptr += rdev->uvd_fw->size;
+
+ rdev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
+ memcpy(rdev->uvd.saved_bo, ptr, size);
+
+ return 0;
}
int radeon_uvd_resume(struct radeon_device *rdev)
{
- int r;
+ unsigned size;
+ void *ptr;
if (rdev->uvd.vcpu_bo == NULL)
return -EINVAL;
- r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
- if (r) {
- radeon_bo_unref(&rdev->uvd.vcpu_bo);
- dev_err(rdev->dev, "(%d) failed to reserve UVD bo\n", r);
- return r;
- }
+ memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
- /* Have been pin in cpu unmap unpin */
- radeon_bo_kunmap(rdev->uvd.vcpu_bo);
- radeon_bo_unpin(rdev->uvd.vcpu_bo);
+ size = radeon_bo_size(rdev->uvd.vcpu_bo);
+ size -= rdev->uvd_fw->size;
- r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
- &rdev->uvd.gpu_addr);
- if (r) {
- radeon_bo_unreserve(rdev->uvd.vcpu_bo);
- radeon_bo_unref(&rdev->uvd.vcpu_bo);
- dev_err(rdev->dev, "(%d) UVD bo pin failed\n", r);
- return r;
- }
+ ptr = rdev->uvd.cpu_addr;
+ ptr += rdev->uvd_fw->size;
- r = radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr);
- if (r) {
- dev_err(rdev->dev, "(%d) UVD map failed\n", r);
- return r;
- }
-
- radeon_bo_unreserve(rdev->uvd.vcpu_bo);
+ if (rdev->uvd.saved_bo != NULL) {
+ memcpy(ptr, rdev->uvd.saved_bo, size);
+ kfree(rdev->uvd.saved_bo);
+ rdev->uvd.saved_bo = NULL;
+ } else
+ memset(ptr, 0, size);
return 0;
}
@@ -230,8 +235,8 @@ void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp)
{
int i, r;
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
- if (rdev->uvd.filp[i] == filp) {
- uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
+ uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
+ if (handle != 0 && rdev->uvd.filp[i] == filp) {
struct radeon_fence *fence;
r = radeon_uvd_get_destroy_msg(rdev,
@@ -351,9 +356,19 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
return -EINVAL;
}
+ if (bo->tbo.sync_obj) {
+ r = radeon_fence_wait(bo->tbo.sync_obj, false);
+ if (r) {
+ DRM_ERROR("Failed waiting for UVD message (%d)!\n", r);
+ return r;
+ }
+ }
+
r = radeon_bo_kmap(bo, &ptr);
- if (r)
+ if (r) {
+ DRM_ERROR("Failed mapping the UVD message (%d)!\n", r);
return r;
+ }
msg = ptr + offset;
@@ -379,8 +394,14 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
radeon_bo_kunmap(bo);
return 0;
} else {
- /* it's a create msg, no special handling needed */
radeon_bo_kunmap(bo);
+
+ if (msg_type != 0) {
+ DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
+ return -EINVAL;
+ }
+
+ /* it's a create msg, no special handling needed */
}
/* create or decode, validate the handle */
@@ -403,7 +424,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
int data0, int data1,
- unsigned buf_sizes[])
+ unsigned buf_sizes[], bool *has_msg_cmd)
{
struct radeon_cs_chunk *relocs_chunk;
struct radeon_cs_reloc *reloc;
@@ -432,7 +453,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
if (cmd < 0x4) {
if ((end - start) < buf_sizes[cmd]) {
- DRM_ERROR("buffer to small (%d / %d)!\n",
+ DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
(unsigned)(end - start), buf_sizes[cmd]);
return -EINVAL;
}
@@ -457,9 +478,17 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
}
if (cmd == 0) {
+ if (*has_msg_cmd) {
+ DRM_ERROR("More than one message in a UVD-IB!\n");
+ return -EINVAL;
+ }
+ *has_msg_cmd = true;
r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes);
if (r)
return r;
+ } else if (!*has_msg_cmd) {
+ DRM_ERROR("Message needed before other commands are send!\n");
+ return -EINVAL;
}
return 0;
@@ -468,7 +497,8 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
static int radeon_uvd_cs_reg(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
int *data0, int *data1,
- unsigned buf_sizes[])
+ unsigned buf_sizes[],
+ bool *has_msg_cmd)
{
int i, r;
@@ -482,7 +512,8 @@ static int radeon_uvd_cs_reg(struct radeon_cs_parser *p,
*data1 = p->idx;
break;
case UVD_GPCOM_VCPU_CMD:
- r = radeon_uvd_cs_reloc(p, *data0, *data1, buf_sizes);
+ r = radeon_uvd_cs_reloc(p, *data0, *data1,
+ buf_sizes, has_msg_cmd);
if (r)
return r;
break;
@@ -503,6 +534,9 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
struct radeon_cs_packet pkt;
int r, data0 = 0, data1 = 0;
+ /* does the IB has a msg command */
+ bool has_msg_cmd = false;
+
/* minimum buffer sizes */
unsigned buf_sizes[] = {
[0x00000000] = 2048,
@@ -529,8 +563,8 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
return r;
switch (pkt.type) {
case RADEON_PACKET_TYPE0:
- r = radeon_uvd_cs_reg(p, &pkt, &data0,
- &data1, buf_sizes);
+ r = radeon_uvd_cs_reg(p, &pkt, &data0, &data1,
+ buf_sizes, &has_msg_cmd);
if (r)
return r;
break;
@@ -542,6 +576,12 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
return -EINVAL;
}
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+
+ if (!has_msg_cmd) {
+ DRM_ERROR("UVD-IBs need a msg command!\n");
+ return -EINVAL;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c
index bef832a..d1a1ce7 100644
--- a/drivers/gpu/drm/radeon/rs780_dpm.c
+++ b/drivers/gpu/drm/radeon/rs780_dpm.c
@@ -28,6 +28,7 @@
#include "r600_dpm.h"
#include "rs780_dpm.h"
#include "atom.h"
+#include <linux/seq_file.h>
static struct igp_ps *rs780_get_ps(struct radeon_ps *rps)
{
@@ -961,3 +962,27 @@ u32 rs780_dpm_get_mclk(struct radeon_device *rdev, bool low)
return pi->bootup_uma_clk;
}
+
+void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+ struct seq_file *m)
+{
+ struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+ struct igp_ps *ps = rs780_get_ps(rps);
+ u32 current_fb_div = RREG32(FVTHROT_STATUS_REG0) & CURRENT_FEEDBACK_DIV_MASK;
+ u32 func_cntl = RREG32(CG_SPLL_FUNC_CNTL);
+ u32 ref_div = ((func_cntl & SPLL_REF_DIV_MASK) >> SPLL_REF_DIV_SHIFT) + 1;
+ u32 post_div = ((func_cntl & SPLL_SW_HILEN_MASK) >> SPLL_SW_HILEN_SHIFT) + 1 +
+ ((func_cntl & SPLL_SW_LOLEN_MASK) >> SPLL_SW_LOLEN_SHIFT) + 1;
+ u32 sclk = (rdev->clock.spll.reference_freq * current_fb_div) /
+ (post_div * ref_div);
+
+ seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+
+ /* guess based on the current sclk */
+ if (sclk < (ps->sclk_low + 500))
+ seq_printf(m, "power level 0 sclk: %u vddc_index: %d\n",
+ ps->sclk_low, ps->min_voltage);
+ else
+ seq_printf(m, "power level 1 sclk: %u vddc_index: %d\n",
+ ps->sclk_high, ps->max_voltage);
+}
diff --git a/drivers/gpu/drm/radeon/rs780d.h b/drivers/gpu/drm/radeon/rs780d.h
index b1142ed..cfbe9a4 100644
--- a/drivers/gpu/drm/radeon/rs780d.h
+++ b/drivers/gpu/drm/radeon/rs780d.h
@@ -28,6 +28,7 @@
# define SPLL_SLEEP (1 << 1)
# define SPLL_REF_DIV(x) ((x) << 2)
# define SPLL_REF_DIV_MASK (7 << 2)
+# define SPLL_REF_DIV_SHIFT 2
# define SPLL_FB_DIV(x) ((x) << 5)
# define SPLL_FB_DIV_MASK (0xff << 2)
# define SPLL_FB_DIV_SHIFT 2
@@ -36,8 +37,10 @@
# define SPLL_PULSENUM_MASK (3 << 14)
# define SPLL_SW_HILEN(x) ((x) << 16)
# define SPLL_SW_HILEN_MASK (0xf << 16)
+# define SPLL_SW_HILEN_SHIFT 16
# define SPLL_SW_LOLEN(x) ((x) << 20)
# define SPLL_SW_LOLEN_MASK (0xf << 20)
+# define SPLL_SW_LOLEN_SHIFT 20
# define SPLL_DIVEN (1 << 24)
# define SPLL_BYPASS_EN (1 << 25)
# define SPLL_CHG_STATUS (1 << 29)
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c
index 8303de2..bdd888b 100644
--- a/drivers/gpu/drm/radeon/rv6xx_dpm.c
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c
@@ -819,7 +819,7 @@ static void rv6xx_program_memory_timing_parameters(struct radeon_device *rdev)
POWERMODE1(calculate_memory_refresh_rate(rdev,
pi->hw.sclks[R600_POWER_LEVEL_MEDIUM])) |
POWERMODE2(calculate_memory_refresh_rate(rdev,
- pi->hw.sclks[R600_POWER_LEVEL_MEDIUM])) |
+ pi->hw.sclks[R600_POWER_LEVEL_HIGH])) |
POWERMODE3(calculate_memory_refresh_rate(rdev,
pi->hw.sclks[R600_POWER_LEVEL_HIGH])));
WREG32(ARB_RFSH_RATE, arb_refresh_rate);
@@ -1182,10 +1182,10 @@ static void rv6xx_program_display_gap(struct radeon_device *rdev)
u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
- if (RREG32(AVIVO_D1CRTC_CONTROL) & AVIVO_CRTC_EN) {
+ if (rdev->pm.dpm.new_active_crtcs & 1) {
tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK);
tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE);
- } else if (RREG32(AVIVO_D2CRTC_CONTROL) & AVIVO_CRTC_EN) {
+ } else if (rdev->pm.dpm.new_active_crtcs & 2) {
tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE);
tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK);
} else {
@@ -1670,6 +1670,8 @@ int rv6xx_dpm_set_power_state(struct radeon_device *rdev)
struct radeon_ps *old_ps = rdev->pm.dpm.current_ps;
int ret;
+ pi->restricted_levels = 0;
+
rv6xx_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
rv6xx_clear_vc(rdev);
@@ -1756,6 +1758,8 @@ int rv6xx_dpm_set_power_state(struct radeon_device *rdev)
rv6xx_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
+ rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
+
return 0;
}
@@ -1763,12 +1767,14 @@ void rv6xx_setup_asic(struct radeon_device *rdev)
{
r600_enable_acpi_pm(rdev);
- if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L0s)
- rv6xx_enable_l0s(rdev);
- if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L1)
- rv6xx_enable_l1(rdev);
- if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1)
- rv6xx_enable_pll_sleep_in_l1(rdev);
+ if (radeon_aspm != 0) {
+ if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L0s)
+ rv6xx_enable_l0s(rdev);
+ if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L1)
+ rv6xx_enable_l1(rdev);
+ if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1)
+ rv6xx_enable_pll_sleep_in_l1(rdev);
+ }
}
void rv6xx_dpm_display_configuration_changed(struct radeon_device *rdev)
@@ -1938,9 +1944,7 @@ static int rv6xx_parse_power_table(struct radeon_device *rdev)
int rv6xx_dpm_init(struct radeon_device *rdev)
{
- int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
- uint16_t data_offset, size;
- uint8_t frev, crev;
+ struct radeon_atom_ss ss;
struct atom_clock_dividers dividers;
struct rv6xx_power_info *pi;
int ret;
@@ -1983,16 +1987,18 @@ int rv6xx_dpm_init(struct radeon_device *rdev)
pi->gfx_clock_gating = true;
- if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
- &frev, &crev, &data_offset)) {
- pi->sclk_ss = true;
- pi->mclk_ss = true;
+ pi->sclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
+ ASIC_INTERNAL_ENGINE_SS, 0);
+ pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
+ ASIC_INTERNAL_MEMORY_SS, 0);
+
+ /* Disable sclk ss, causes hangs on a lot of systems */
+ pi->sclk_ss = false;
+
+ if (pi->sclk_ss || pi->mclk_ss)
pi->dynamic_ss = true;
- } else {
- pi->sclk_ss = false;
- pi->mclk_ss = false;
+ else
pi->dynamic_ss = false;
- }
pi->dynamic_pcie_gen2 = true;
@@ -2083,3 +2089,34 @@ u32 rv6xx_dpm_get_mclk(struct radeon_device *rdev, bool low)
else
return requested_state->high.mclk;
}
+
+int rv6xx_dpm_force_performance_level(struct radeon_device *rdev,
+ enum radeon_dpm_forced_level level)
+{
+ struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
+
+ if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
+ pi->restricted_levels = 3;
+ } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
+ pi->restricted_levels = 2;
+ } else {
+ pi->restricted_levels = 0;
+ }
+
+ rv6xx_clear_vc(rdev);
+ r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, true);
+ r600_set_at(rdev, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF);
+ r600_wait_for_power_level(rdev, R600_POWER_LEVEL_LOW);
+ r600_power_level_enable(rdev, R600_POWER_LEVEL_HIGH, false);
+ r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, false);
+ rv6xx_enable_medium(rdev);
+ rv6xx_enable_high(rdev);
+ if (pi->restricted_levels == 3)
+ r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, false);
+ rv6xx_program_vc(rdev);
+ rv6xx_program_at(rdev);
+
+ rdev->pm.dpm.forced_level = level;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 4a62ad2..f5e92cf 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -744,10 +744,10 @@ static void rv770_init_golden_registers(struct radeon_device *rdev)
(const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
radeon_program_register_sequence(rdev,
rv730_golden_registers,
- (const u32)ARRAY_SIZE(rv770_golden_registers));
+ (const u32)ARRAY_SIZE(rv730_golden_registers));
radeon_program_register_sequence(rdev,
rv730_mgcg_init,
- (const u32)ARRAY_SIZE(rv770_mgcg_init));
+ (const u32)ARRAY_SIZE(rv730_mgcg_init));
break;
case CHIP_RV710:
radeon_program_register_sequence(rdev,
@@ -758,18 +758,18 @@ static void rv770_init_golden_registers(struct radeon_device *rdev)
(const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
radeon_program_register_sequence(rdev,
rv710_golden_registers,
- (const u32)ARRAY_SIZE(rv770_golden_registers));
+ (const u32)ARRAY_SIZE(rv710_golden_registers));
radeon_program_register_sequence(rdev,
rv710_mgcg_init,
- (const u32)ARRAY_SIZE(rv770_mgcg_init));
+ (const u32)ARRAY_SIZE(rv710_mgcg_init));
break;
case CHIP_RV740:
radeon_program_register_sequence(rdev,
rv740_golden_registers,
- (const u32)ARRAY_SIZE(rv770_golden_registers));
+ (const u32)ARRAY_SIZE(rv740_golden_registers));
radeon_program_register_sequence(rdev,
rv740_mgcg_init,
- (const u32)ARRAY_SIZE(rv770_mgcg_init));
+ (const u32)ARRAY_SIZE(rv740_mgcg_init));
break;
default:
break;
@@ -1829,6 +1829,8 @@ static int rv770_startup(struct radeon_device *rdev)
/* enable pcie gen2 link */
rv770_pcie_gen2_enable(rdev);
+ rv770_mc_program(rdev);
+
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
r = r600_init_microcode(rdev);
if (r) {
@@ -1841,7 +1843,6 @@ static int rv770_startup(struct radeon_device *rdev)
if (r)
return r;
- rv770_mc_program(rdev);
if (rdev->flags & RADEON_IS_AGP) {
rv770_agp_enable(rdev);
} else {
@@ -1983,6 +1984,7 @@ int rv770_resume(struct radeon_device *rdev)
int rv770_suspend(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
+ r600_uvd_stop(rdev);
radeon_uvd_suspend(rdev);
r700_cp_stop(rdev);
r600_dma_stop(rdev);
@@ -2098,6 +2100,7 @@ void rv770_fini(struct radeon_device *rdev)
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
rv770_pcie_gart_fini(rdev);
+ r600_uvd_stop(rdev);
radeon_uvd_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index d914e04..094c67a 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -2099,12 +2099,14 @@ void rv770_dpm_setup_asic(struct radeon_device *rdev)
rv770_enable_acpi_pm(rdev);
- if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L0s)
- rv770_enable_l0s(rdev);
- if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L1)
- rv770_enable_l1(rdev);
- if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1)
- rv770_enable_pll_sleep_in_l1(rdev);
+ if (radeon_aspm != 0) {
+ if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L0s)
+ rv770_enable_l0s(rdev);
+ if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L1)
+ rv770_enable_l1(rdev);
+ if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1)
+ rv770_enable_pll_sleep_in_l1(rdev);
+ }
}
void rv770_dpm_display_configuration_changed(struct radeon_device *rdev)
@@ -2317,12 +2319,25 @@ int rv7xx_parse_power_table(struct radeon_device *rdev)
return 0;
}
+void rv770_get_engine_memory_ss(struct radeon_device *rdev)
+{
+ struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+ struct radeon_atom_ss ss;
+
+ pi->sclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
+ ASIC_INTERNAL_ENGINE_SS, 0);
+ pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
+ ASIC_INTERNAL_MEMORY_SS, 0);
+
+ if (pi->sclk_ss || pi->mclk_ss)
+ pi->dynamic_ss = true;
+ else
+ pi->dynamic_ss = false;
+}
+
int rv770_dpm_init(struct radeon_device *rdev)
{
struct rv7xx_power_info *pi;
- int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
- uint16_t data_offset, size;
- uint8_t frev, crev;
struct atom_clock_dividers dividers;
int ret;
@@ -2367,16 +2382,7 @@ int rv770_dpm_init(struct radeon_device *rdev)
pi->mvdd_control =
radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, 0);
- if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
- &frev, &crev, &data_offset)) {
- pi->sclk_ss = true;
- pi->mclk_ss = true;
- pi->dynamic_ss = true;
- } else {
- pi->sclk_ss = false;
- pi->mclk_ss = false;
- pi->dynamic_ss = false;
- }
+ rv770_get_engine_memory_ss(rdev);
pi->asi = RV770_ASI_DFLT;
pi->pasi = RV770_HASI_DFLT;
@@ -2391,8 +2397,7 @@ int rv770_dpm_init(struct radeon_device *rdev)
pi->dynamic_pcie_gen2 = true;
- if (pi->gfx_clock_gating &&
- (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
+ if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
pi->thermal_protection = true;
else
pi->thermal_protection = false;
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.h b/drivers/gpu/drm/radeon/rv770_dpm.h
index 96b1b2a..9244eff 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.h
+++ b/drivers/gpu/drm/radeon/rv770_dpm.h
@@ -275,6 +275,7 @@ void rv770_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev,
void rv770_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev,
struct radeon_ps *new_ps,
struct radeon_ps *old_ps);
+void rv770_get_engine_memory_ss(struct radeon_device *rdev);
/* smc */
int rv770_read_smc_soft_register(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 2349067..daa8d2d 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -22,7 +22,6 @@
* Authors: Alex Deucher
*/
#include <linux/firmware.h>
-#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <drm/drmP.h>
@@ -1541,7 +1540,6 @@ static int si_mc_load_microcode(struct radeon_device *rdev)
static int si_init_microcode(struct radeon_device *rdev)
{
- struct platform_device *pdev;
const char *chip_name;
const char *rlc_chip_name;
size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size;
@@ -1551,13 +1549,6 @@ static int si_init_microcode(struct radeon_device *rdev)
DRM_DEBUG("\n");
- pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
- err = IS_ERR(pdev);
- if (err) {
- printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
- return -EINVAL;
- }
-
switch (rdev->family) {
case CHIP_TAHITI:
chip_name = "TAHITI";
@@ -1615,7 +1606,7 @@ static int si_init_microcode(struct radeon_device *rdev)
DRM_INFO("Loading %s Microcode\n", chip_name);
snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
- err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
+ err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
if (err)
goto out;
if (rdev->pfp_fw->size != pfp_req_size) {
@@ -1627,7 +1618,7 @@ static int si_init_microcode(struct radeon_device *rdev)
}
snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
- err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
+ err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
if (err)
goto out;
if (rdev->me_fw->size != me_req_size) {
@@ -1638,7 +1629,7 @@ static int si_init_microcode(struct radeon_device *rdev)
}
snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
- err = request_firmware(&rdev->ce_fw, fw_name, &pdev->dev);
+ err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
if (err)
goto out;
if (rdev->ce_fw->size != ce_req_size) {
@@ -1649,7 +1640,7 @@ static int si_init_microcode(struct radeon_device *rdev)
}
snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
- err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
+ err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
if (err)
goto out;
if (rdev->rlc_fw->size != rlc_req_size) {
@@ -1660,7 +1651,7 @@ static int si_init_microcode(struct radeon_device *rdev)
}
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
- err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev);
+ err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
if (err)
goto out;
if (rdev->mc_fw->size != mc_req_size) {
@@ -1671,10 +1662,14 @@ static int si_init_microcode(struct radeon_device *rdev)
}
snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
- err = request_firmware(&rdev->smc_fw, fw_name, &pdev->dev);
- if (err)
- goto out;
- if (rdev->smc_fw->size != smc_req_size) {
+ err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
+ if (err) {
+ printk(KERN_ERR
+ "smc: error loading firmware \"%s\"\n",
+ fw_name);
+ release_firmware(rdev->smc_fw);
+ rdev->smc_fw = NULL;
+ } else if (rdev->smc_fw->size != smc_req_size) {
printk(KERN_ERR
"si_smc: Bogus length %zu in firmware \"%s\"\n",
rdev->smc_fw->size, fw_name);
@@ -1682,8 +1677,6 @@ static int si_init_microcode(struct radeon_device *rdev)
}
out:
- platform_device_unregister(pdev);
-
if (err) {
if (err != -EINVAL)
printk(KERN_ERR
@@ -4401,6 +4394,270 @@ void si_vm_fini(struct radeon_device *rdev)
}
/**
+ * si_vm_decode_fault - print human readable fault info
+ *
+ * @rdev: radeon_device pointer
+ * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
+ * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
+ *
+ * Print human readable fault information (SI).
+ */
+static void si_vm_decode_fault(struct radeon_device *rdev,
+ u32 status, u32 addr)
+{
+ u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
+ u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
+ u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
+ char *block;
+
+ if (rdev->family == CHIP_TAHITI) {
+ switch (mc_id) {
+ case 160:
+ case 144:
+ case 96:
+ case 80:
+ case 224:
+ case 208:
+ case 32:
+ case 16:
+ block = "CB";
+ break;
+ case 161:
+ case 145:
+ case 97:
+ case 81:
+ case 225:
+ case 209:
+ case 33:
+ case 17:
+ block = "CB_FMASK";
+ break;
+ case 162:
+ case 146:
+ case 98:
+ case 82:
+ case 226:
+ case 210:
+ case 34:
+ case 18:
+ block = "CB_CMASK";
+ break;
+ case 163:
+ case 147:
+ case 99:
+ case 83:
+ case 227:
+ case 211:
+ case 35:
+ case 19:
+ block = "CB_IMMED";
+ break;
+ case 164:
+ case 148:
+ case 100:
+ case 84:
+ case 228:
+ case 212:
+ case 36:
+ case 20:
+ block = "DB";
+ break;
+ case 165:
+ case 149:
+ case 101:
+ case 85:
+ case 229:
+ case 213:
+ case 37:
+ case 21:
+ block = "DB_HTILE";
+ break;
+ case 167:
+ case 151:
+ case 103:
+ case 87:
+ case 231:
+ case 215:
+ case 39:
+ case 23:
+ block = "DB_STEN";
+ break;
+ case 72:
+ case 68:
+ case 64:
+ case 8:
+ case 4:
+ case 0:
+ case 136:
+ case 132:
+ case 128:
+ case 200:
+ case 196:
+ case 192:
+ block = "TC";
+ break;
+ case 112:
+ case 48:
+ block = "CP";
+ break;
+ case 49:
+ case 177:
+ case 50:
+ case 178:
+ block = "SH";
+ break;
+ case 53:
+ case 190:
+ block = "VGT";
+ break;
+ case 117:
+ block = "IH";
+ break;
+ case 51:
+ case 115:
+ block = "RLC";
+ break;
+ case 119:
+ case 183:
+ block = "DMA0";
+ break;
+ case 61:
+ block = "DMA1";
+ break;
+ case 248:
+ case 120:
+ block = "HDP";
+ break;
+ default:
+ block = "unknown";
+ break;
+ }
+ } else {
+ switch (mc_id) {
+ case 32:
+ case 16:
+ case 96:
+ case 80:
+ case 160:
+ case 144:
+ case 224:
+ case 208:
+ block = "CB";
+ break;
+ case 33:
+ case 17:
+ case 97:
+ case 81:
+ case 161:
+ case 145:
+ case 225:
+ case 209:
+ block = "CB_FMASK";
+ break;
+ case 34:
+ case 18:
+ case 98:
+ case 82:
+ case 162:
+ case 146:
+ case 226:
+ case 210:
+ block = "CB_CMASK";
+ break;
+ case 35:
+ case 19:
+ case 99:
+ case 83:
+ case 163:
+ case 147:
+ case 227:
+ case 211:
+ block = "CB_IMMED";
+ break;
+ case 36:
+ case 20:
+ case 100:
+ case 84:
+ case 164:
+ case 148:
+ case 228:
+ case 212:
+ block = "DB";
+ break;
+ case 37:
+ case 21:
+ case 101:
+ case 85:
+ case 165:
+ case 149:
+ case 229:
+ case 213:
+ block = "DB_HTILE";
+ break;
+ case 39:
+ case 23:
+ case 103:
+ case 87:
+ case 167:
+ case 151:
+ case 231:
+ case 215:
+ block = "DB_STEN";
+ break;
+ case 72:
+ case 68:
+ case 8:
+ case 4:
+ case 136:
+ case 132:
+ case 200:
+ case 196:
+ block = "TC";
+ break;
+ case 112:
+ case 48:
+ block = "CP";
+ break;
+ case 49:
+ case 177:
+ case 50:
+ case 178:
+ block = "SH";
+ break;
+ case 53:
+ block = "VGT";
+ break;
+ case 117:
+ block = "IH";
+ break;
+ case 51:
+ case 115:
+ block = "RLC";
+ break;
+ case 119:
+ case 183:
+ block = "DMA0";
+ break;
+ case 61:
+ block = "DMA1";
+ break;
+ case 248:
+ case 120:
+ block = "HDP";
+ break;
+ default:
+ block = "unknown";
+ break;
+ }
+ }
+
+ printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n",
+ protections, vmid, addr,
+ (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
+ block, mc_id);
+}
+
+/**
* si_vm_set_page - update the page tables using the CP
*
* @rdev: radeon_device pointer
@@ -4962,14 +5219,12 @@ static void si_enable_mc_ls(struct radeon_device *rdev,
static void si_init_cg(struct radeon_device *rdev)
{
- bool has_uvd = true;
-
si_enable_mgcg(rdev, true);
- si_enable_cgcg(rdev, true);
+ si_enable_cgcg(rdev, false);
/* disable MC LS on Tahiti */
if (rdev->family == CHIP_TAHITI)
si_enable_mc_ls(rdev, false);
- if (has_uvd) {
+ if (rdev->has_uvd) {
si_enable_uvd_mgcg(rdev, true);
si_init_uvd_internal_cg(rdev);
}
@@ -4977,9 +5232,7 @@ static void si_init_cg(struct radeon_device *rdev)
static void si_fini_cg(struct radeon_device *rdev)
{
- bool has_uvd = true;
-
- if (has_uvd)
+ if (rdev->has_uvd)
si_enable_uvd_mgcg(rdev, false);
si_enable_cgcg(rdev, false);
si_enable_mgcg(rdev, false);
@@ -4988,11 +5241,11 @@ static void si_fini_cg(struct radeon_device *rdev)
static void si_init_pg(struct radeon_device *rdev)
{
bool has_pg = false;
-
+#if 0
/* only cape verde supports PG */
if (rdev->family == CHIP_VERDE)
has_pg = true;
-
+#endif
if (has_pg) {
si_init_ao_cu_mask(rdev);
si_init_dma_pg(rdev);
@@ -5766,6 +6019,7 @@ int si_irq_process(struct radeon_device *rdev)
u32 ring_index;
bool queue_hotplug = false;
bool queue_thermal = false;
+ u32 status, addr;
if (!rdev->ih.enabled || rdev->shutdown)
return IRQ_NONE;
@@ -6001,11 +6255,14 @@ restart_ih:
break;
case 146:
case 147:
+ addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
+ status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
- RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
+ addr);
dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
- RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
+ status);
+ si_vm_decode_fault(rdev, status, addr);
/* reset addr and status */
WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
break;
@@ -6165,6 +6422,8 @@ static int si_startup(struct radeon_device *rdev)
/* enable aspm */
si_program_aspm(rdev);
+ si_mc_program(rdev);
+
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
!rdev->rlc_fw || !rdev->mc_fw) {
r = si_init_microcode(rdev);
@@ -6184,7 +6443,6 @@ static int si_startup(struct radeon_device *rdev)
if (r)
return r;
- si_mc_program(rdev);
r = si_pcie_gart_enable(rdev);
if (r)
return r;
@@ -6368,7 +6626,7 @@ int si_suspend(struct radeon_device *rdev)
si_cp_enable(rdev, false);
cayman_dma_stop(rdev);
if (rdev->has_uvd) {
- r600_uvd_rbc_stop(rdev);
+ r600_uvd_stop(rdev);
radeon_uvd_suspend(rdev);
}
si_irq_suspend(rdev);
@@ -6510,8 +6768,10 @@ void si_fini(struct radeon_device *rdev)
radeon_vm_manager_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
- if (rdev->has_uvd)
+ if (rdev->has_uvd) {
+ r600_uvd_stop(rdev);
radeon_uvd_fini(rdev);
+ }
si_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
@@ -6796,6 +7056,9 @@ static void si_program_aspm(struct radeon_device *rdev)
bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false;
bool disable_clkreq = false;
+ if (radeon_aspm == 0)
+ return;
+
if (!(rdev->flags & RADEON_IS_PCIE))
return;
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 73aaa2e..88699e3 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -37,8 +37,6 @@
#define SMC_RAM_END 0x20000
-#define DDR3_DRAM_ROWS 0x2000
-
#define SCLK_MIN_DEEPSLEEP_FREQ 1350
static const struct si_cac_config_reg cac_weights_tahiti[] =
@@ -1767,8 +1765,9 @@ static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coe
{
s64 kt, kv, leakage_w, i_leakage, vddc;
s64 temperature, t_slope, t_intercept, av, bv, t_ref;
+ s64 tmp;
- i_leakage = drm_int2fixp(ileakage / 100);
+ i_leakage = div64_s64(drm_int2fixp(ileakage), 100);
vddc = div64_s64(drm_int2fixp(v), 1000);
temperature = div64_s64(drm_int2fixp(t), 1000);
@@ -1778,8 +1777,9 @@ static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coe
bv = div64_s64(drm_int2fixp(coeff->bv), 100000000);
t_ref = drm_int2fixp(coeff->t_ref);
- kt = drm_fixp_div(drm_fixp_exp(drm_fixp_mul(drm_fixp_mul(t_slope, vddc) + t_intercept, temperature)),
- drm_fixp_exp(drm_fixp_mul(drm_fixp_mul(t_slope, vddc) + t_intercept, t_ref)));
+ tmp = drm_fixp_mul(t_slope, vddc) + t_intercept;
+ kt = drm_fixp_exp(drm_fixp_mul(tmp, temperature));
+ kt = drm_fixp_div(kt, drm_fixp_exp(drm_fixp_mul(tmp, t_ref)));
kv = drm_fixp_mul(av, drm_fixp_exp(drm_fixp_mul(bv, vddc)));
leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc);
@@ -1931,6 +1931,7 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev)
si_pi->cac_override = cac_override_pitcairn;
si_pi->powertune_data = &powertune_data_pitcairn;
si_pi->dte_data = dte_data_pitcairn;
+ break;
}
} else if (rdev->family == CHIP_VERDE) {
si_pi->lcac_config = lcac_cape_verde;
@@ -1941,6 +1942,7 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev)
case 0x683B:
case 0x683F:
case 0x6829:
+ case 0x6835:
si_pi->cac_weights = cac_weights_cape_verde_pro;
si_pi->dte_data = dte_data_cape_verde;
break;
@@ -2901,7 +2903,8 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
{
struct ni_ps *ps = ni_get_ps(rps);
struct radeon_clock_and_voltage_limits *max_limits;
- bool disable_mclk_switching;
+ bool disable_mclk_switching = false;
+ bool disable_sclk_switching = false;
u32 mclk, sclk;
u16 vddc, vddci;
int i;
@@ -2909,8 +2912,11 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
ni_dpm_vblank_too_short(rdev))
disable_mclk_switching = true;
- else
- disable_mclk_switching = false;
+
+ if (rps->vclk || rps->dclk) {
+ disable_mclk_switching = true;
+ disable_sclk_switching = true;
+ }
if (rdev->pm.dpm.ac_power)
max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
@@ -2938,27 +2944,43 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
if (disable_mclk_switching) {
mclk = ps->performance_levels[ps->performance_level_count - 1].mclk;
- sclk = ps->performance_levels[0].sclk;
- vddc = ps->performance_levels[0].vddc;
vddci = ps->performance_levels[ps->performance_level_count - 1].vddci;
} else {
- sclk = ps->performance_levels[0].sclk;
mclk = ps->performance_levels[0].mclk;
- vddc = ps->performance_levels[0].vddc;
vddci = ps->performance_levels[0].vddci;
}
+ if (disable_sclk_switching) {
+ sclk = ps->performance_levels[ps->performance_level_count - 1].sclk;
+ vddc = ps->performance_levels[ps->performance_level_count - 1].vddc;
+ } else {
+ sclk = ps->performance_levels[0].sclk;
+ vddc = ps->performance_levels[0].vddc;
+ }
+
/* adjusted low state */
ps->performance_levels[0].sclk = sclk;
ps->performance_levels[0].mclk = mclk;
ps->performance_levels[0].vddc = vddc;
ps->performance_levels[0].vddci = vddci;
- for (i = 1; i < ps->performance_level_count; i++) {
- if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk)
- ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk;
- if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc)
- ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc;
+ if (disable_sclk_switching) {
+ sclk = ps->performance_levels[0].sclk;
+ for (i = 1; i < ps->performance_level_count; i++) {
+ if (sclk < ps->performance_levels[i].sclk)
+ sclk = ps->performance_levels[i].sclk;
+ }
+ for (i = 0; i < ps->performance_level_count; i++) {
+ ps->performance_levels[i].sclk = sclk;
+ ps->performance_levels[i].vddc = vddc;
+ }
+ } else {
+ for (i = 1; i < ps->performance_level_count; i++) {
+ if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk)
+ ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk;
+ if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc)
+ ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc;
+ }
}
if (disable_mclk_switching) {
@@ -3237,10 +3259,10 @@ int si_dpm_force_performance_level(struct radeon_device *rdev,
{
struct radeon_ps *rps = rdev->pm.dpm.current_ps;
struct ni_ps *ps = ni_get_ps(rps);
- u32 levels;
+ u32 levels = ps->performance_level_count;
if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
- if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) != PPSMC_Result_OK)
+ if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
return -EINVAL;
if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 1) != PPSMC_Result_OK)
@@ -3249,14 +3271,13 @@ int si_dpm_force_performance_level(struct radeon_device *rdev,
if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
return -EINVAL;
- levels = ps->performance_level_count - 1;
- if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
+ if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 1) != PPSMC_Result_OK)
return -EINVAL;
} else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
return -EINVAL;
- if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) != PPSMC_Result_OK)
+ if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
return -EINVAL;
}
@@ -3620,8 +3641,12 @@ static void si_enable_display_gap(struct radeon_device *rdev)
{
u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
+ tmp &= ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
+ tmp |= (DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
+ DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE));
+
tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
- tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE) |
+ tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK) |
DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE));
WREG32(CG_DISPLAY_GAP_CNTL, tmp);
}
@@ -4036,16 +4061,15 @@ static int si_force_switch_to_arb_f0(struct radeon_device *rdev)
static u32 si_calculate_memory_refresh_rate(struct radeon_device *rdev,
u32 engine_clock)
{
- struct rv7xx_power_info *pi = rv770_get_pi(rdev);
u32 dram_rows;
u32 dram_refresh_rate;
u32 mc_arb_rfsh_rate;
u32 tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
- if (pi->mem_gddr5)
- dram_rows = 1 << (tmp + 10);
+ if (tmp >= 4)
+ dram_rows = 16384;
else
- dram_rows = DDR3_DRAM_ROWS;
+ dram_rows = 1 << (tmp + 10);
dram_refresh_rate = 1 << ((RREG32(MC_SEQ_MISC0) & 0x3) + 3);
mc_arb_rfsh_rate = ((engine_clock * 10) * dram_refresh_rate / dram_rows - 32) / 64;
@@ -6013,16 +6037,11 @@ int si_dpm_set_power_state(struct radeon_device *rdev)
return ret;
}
-#if 0
- /* XXX */
ret = si_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
if (ret) {
DRM_ERROR("si_dpm_force_performance_level failed\n");
return ret;
}
-#else
- rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
-#endif
return 0;
}
@@ -6254,9 +6273,6 @@ int si_dpm_init(struct radeon_device *rdev)
struct evergreen_power_info *eg_pi;
struct ni_power_info *ni_pi;
struct si_power_info *si_pi;
- int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
- u16 data_offset, size;
- u8 frev, crev;
struct atom_clock_dividers dividers;
int ret;
u32 mask;
@@ -6347,16 +6363,7 @@ int si_dpm_init(struct radeon_device *rdev)
si_pi->vddc_phase_shed_control =
radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, VOLTAGE_OBJ_PHASE_LUT);
- if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
- &frev, &crev, &data_offset)) {
- pi->sclk_ss = true;
- pi->mclk_ss = true;
- pi->dynamic_ss = true;
- } else {
- pi->sclk_ss = false;
- pi->mclk_ss = false;
- pi->dynamic_ss = true;
- }
+ rv770_get_engine_memory_ss(rdev);
pi->asi = RV770_ASI_DFLT;
pi->pasi = CYPRESS_HASI_DFLT;
@@ -6367,8 +6374,7 @@ int si_dpm_init(struct radeon_device *rdev)
eg_pi->sclk_deep_sleep = true;
si_pi->sclk_deep_sleep_above_low = false;
- if (pi->gfx_clock_gating &&
- (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
+ if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
pi->thermal_protection = true;
else
pi->thermal_protection = false;
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 12a20eb..2c8da27 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -367,6 +367,20 @@
#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC
#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC
+#define PROTECTIONS_MASK (0xf << 0)
+#define PROTECTIONS_SHIFT 0
+ /* bit 0: range
+ * bit 1: pde0
+ * bit 2: valid
+ * bit 3: read
+ * bit 4: write
+ */
+#define MEMORY_CLIENT_ID_MASK (0xff << 12)
+#define MEMORY_CLIENT_ID_SHIFT 12
+#define MEMORY_CLIENT_RW_MASK (1 << 24)
+#define MEMORY_CLIENT_RW_SHIFT 24
+#define FAULT_VMID_MASK (0xf << 25)
+#define FAULT_VMID_SHIFT 25
#define VM_INVALIDATE_REQUEST 0x1478
#define VM_INVALIDATE_RESPONSE 0x147c
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index 11b6b99..c0a8503 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -1732,7 +1732,13 @@ int sumo_dpm_init(struct radeon_device *rdev)
pi->enable_sclk_ds = true;
pi->enable_dynamic_m3_arbiter = false;
pi->enable_dynamic_patch_ps = true;
- pi->enable_gfx_power_gating = true;
+ /* Some PALM chips don't seem to properly ungate gfx when UVD is in use;
+ * for now just disable gfx PG.
+ */
+ if (rdev->family == CHIP_PALM)
+ pi->enable_gfx_power_gating = false;
+ else
+ pi->enable_gfx_power_gating = true;
pi->enable_gfx_clock_gating = true;
pi->enable_mg_clock_gating = true;
pi->enable_auto_thermal_throttling = true;
@@ -1845,6 +1851,8 @@ int sumo_dpm_force_performance_level(struct radeon_device *rdev,
return 0;
if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
+ if (pi->enable_boost)
+ sumo_enable_boost(rdev, rps, false);
sumo_power_level_enable(rdev, ps->num_levels - 1, true);
sumo_set_forced_level(rdev, ps->num_levels - 1);
sumo_set_forced_mode_enabled(rdev);
@@ -1855,6 +1863,8 @@ int sumo_dpm_force_performance_level(struct radeon_device *rdev,
sumo_set_forced_mode_enabled(rdev);
sumo_set_forced_mode(rdev, false);
} else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
+ if (pi->enable_boost)
+ sumo_enable_boost(rdev, rps, false);
sumo_power_level_enable(rdev, 0, true);
sumo_set_forced_level(rdev, 0);
sumo_set_forced_mode_enabled(rdev);
@@ -1868,6 +1878,8 @@ int sumo_dpm_force_performance_level(struct radeon_device *rdev,
for (i = 0; i < ps->num_levels; i++) {
sumo_power_level_enable(rdev, i, true);
}
+ if (pi->enable_boost)
+ sumo_enable_boost(rdev, rps, true);
}
rdev->pm.dpm.forced_level = level;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index ff82877..dc0fe09 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -249,8 +249,13 @@ static struct drm_driver rcar_du_driver = {
.gem_vm_ops = &drm_gem_cma_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import = drm_gem_cma_dmabuf_import,
- .gem_prime_export = drm_gem_cma_dmabuf_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
+ .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
.dumb_create = rcar_du_dumb_create,
.dumb_map_offset = drm_gem_cma_dumb_map_offset,
.dumb_destroy = drm_gem_cma_dumb_destroy,
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index edc1018..5f83f9a 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -276,8 +276,13 @@ static struct drm_driver shmob_drm_driver = {
.gem_vm_ops = &drm_gem_cma_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import = drm_gem_cma_dmabuf_import,
- .gem_prime_export = drm_gem_cma_dmabuf_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
+ .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
.dumb_create = drm_gem_cma_dumb_create,
.dumb_map_offset = drm_gem_cma_dumb_map_offset,
.dumb_destroy = drm_gem_cma_dumb_destroy,