summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2015-12-21 19:31:35 (GMT)
committerAlex Deucher <alexander.deucher@amd.com>2016-02-10 19:16:45 (GMT)
commit36409d122cb84fa8f25a42b95a32c6090790e571 (patch)
treeb1d19edfad8b25d539d79fd9582dab10729e6004 /drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
parentf69f90a113f283b33b7b750204d708711a42b396 (diff)
downloadlinux-36409d122cb84fa8f25a42b95a32c6090790e571.tar.xz
drm/amdgpu: cleanup amdgpu_cs_list_validate
No need to actually check the current placement. Just use the allowed domains when the threshold is reached. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c66
1 files changed, 31 insertions, 35 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 2f80da0..c63efd7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -342,48 +342,44 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_list_entry *lobj;
- struct amdgpu_bo *bo;
u64 initial_bytes_moved;
int r;
list_for_each_entry(lobj, validated, tv.head) {
- bo = lobj->robj;
- if (!bo->pin_count) {
- u32 domain = lobj->prefered_domains;
- u32 current_domain =
- amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
-
- /* Check if this buffer will be moved and don't move it
- * if we have moved too many buffers for this IB already.
- *
- * Note that this allows moving at least one buffer of
- * any size, because it doesn't take the current "bo"
- * into account. We don't want to disallow buffer moves
- * completely.
- */
- if ((lobj->allowed_domains & current_domain) != 0 &&
- (domain & current_domain) == 0 && /* will be moved */
- p->bytes_moved > p->bytes_moved_threshold) {
- /* don't move it */
- domain = current_domain;
- }
+ struct amdgpu_bo *bo = lobj->robj;
+ uint32_t domain;
- retry:
- amdgpu_ttm_placement_from_domain(bo, domain);
- initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
- p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) -
- initial_bytes_moved;
-
- if (unlikely(r)) {
- if (r != -ERESTARTSYS && domain != lobj->allowed_domains) {
- domain = lobj->allowed_domains;
- goto retry;
- }
- return r;
+ lobj->bo_va = amdgpu_vm_bo_find(vm, bo);
+ if (bo->pin_count)
+ continue;
+
+ /* Avoid moving this one if we have moved too many buffers
+ * for this IB already.
+ *
+ * Note that this allows moving at least one buffer of
+ * any size, because it doesn't take the current "bo"
+ * into account. We don't want to disallow buffer moves
+ * completely.
+ */
+ if (p->bytes_moved <= p->bytes_moved_threshold)
+ domain = lobj->prefered_domains;
+ else
+ domain = lobj->allowed_domains;
+
+ retry:
+ amdgpu_ttm_placement_from_domain(bo, domain);
+ initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+ p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) -
+ initial_bytes_moved;
+
+ if (unlikely(r)) {
+ if (r != -ERESTARTSYS && domain != lobj->allowed_domains) {
+ domain = lobj->allowed_domains;
+ goto retry;
}
+ return r;
}
- lobj->bo_va = amdgpu_vm_bo_find(vm, bo);
}
return 0;
}