From aac1e3caac343c05ab550fa611489bd206aa8a7b Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 24 Feb 2016 10:06:06 -0500 Subject: drm/amdgpu/gfx: fix off by one in rb rework (v2) When I reworked this code, I messed up num rb count. v2: use hweight32 Reviewed-by: Ken Wang Signed-off-by: Alex Deucher diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 250bcbc..bc5bdaf 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -1635,7 +1635,7 @@ static u32 gfx_v7_0_get_rb_active_bitmap(struct amdgpu_device *adev) static void gfx_v7_0_setup_rb(struct amdgpu_device *adev) { int i, j; - u32 data, tmp, num_rbs = 0; + u32 data; u32 active_rbs = 0; mutex_lock(&adev->grbm_idx_mutex); @@ -1655,10 +1655,7 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev) mutex_unlock(&adev->grbm_idx_mutex); adev->gfx.config.backend_enable_mask = active_rbs; - tmp = active_rbs; - while (tmp >>= 1) - num_rbs++; - adev->gfx.config.num_rbs = num_rbs; + adev->gfx.config.num_rbs = hweight32(active_rbs); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 10c8650..71d536e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -2613,7 +2613,7 @@ static u32 gfx_v8_0_get_rb_active_bitmap(struct amdgpu_device *adev) static void gfx_v8_0_setup_rb(struct amdgpu_device *adev) { int i, j; - u32 data, tmp, num_rbs = 0; + u32 data; u32 active_rbs = 0; mutex_lock(&adev->grbm_idx_mutex); @@ -2629,10 +2629,7 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev) mutex_unlock(&adev->grbm_idx_mutex); adev->gfx.config.backend_enable_mask = active_rbs; - tmp = active_rbs; - while (tmp >>= 1) - num_rbs++; - adev->gfx.config.num_rbs = num_rbs; + adev->gfx.config.num_rbs = hweight32(active_rbs); } /** -- cgit v0.10.2 From a6b91770ded280067418aa9945441a95601f5935 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 24 Feb 2016 09:13:45 +0100 Subject: drm/amd: Do not make DRM_AMD_ACP default to y By default, not only this driver is enabled on all platforms, but also generic PM Domains and Multi-Function Devices. Drop the "default y" to fix this. Signed-off-by: Geert Uytterhoeven Signed-off-by: Alex Deucher diff --git a/drivers/gpu/drm/amd/acp/Kconfig b/drivers/gpu/drm/amd/acp/Kconfig index 2b07813..0f734ee 100644 --- a/drivers/gpu/drm/amd/acp/Kconfig +++ b/drivers/gpu/drm/amd/acp/Kconfig @@ -2,7 +2,6 @@ menu "ACP Configuration" config DRM_AMD_ACP bool "Enable ACP IP support" - default y select MFD_CORE select PM_GENERIC_DOMAINS if PM help -- cgit v0.10.2 From 0994c09c0412591ccd7303eec263a9dc5119c653 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Thu, 25 Feb 2016 17:48:24 +0800 Subject: drm/amd/powerplay: fix code style warning. Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index 2ee4190..0db6423 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -606,7 +606,7 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size) if (hwmgr == NULL || hwmgr->hwmgr_func == NULL || hwmgr->hwmgr_func->set_pp_table == NULL) - return -EINVAL; + return -EINVAL; return hwmgr->hwmgr_func->set_pp_table(hwmgr, buf, size); } @@ -623,7 +623,7 @@ static int pp_dpm_force_clock_level(void *handle, if (hwmgr == NULL || hwmgr->hwmgr_func == NULL || hwmgr->hwmgr_func->force_clock_level == NULL) - return -EINVAL; + return -EINVAL; return hwmgr->hwmgr_func->force_clock_level(hwmgr, type, level); } -- cgit v0.10.2 From 70eacc72dbbc229013fee54550c03179f9a513d8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 22 Feb 2016 15:11:56 +0100 Subject: drm/amdgpu: fix error handling in amdgpu_bo_list_set MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Don't leak BOs in case of some error. Signed-off-by: Christian König Reviewed-by: Alex Deucher diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index 90d6fc1..4792f9d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c @@ -118,6 +118,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev, usermm = amdgpu_ttm_tt_get_usermm(entry->robj->tbo.ttm); if (usermm) { if (usermm != current->mm) { + amdgpu_bo_unref(&entry->robj); r = -EPERM; goto error_free; } @@ -151,6 +152,8 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev, return 0; error_free: + while (i--) + amdgpu_bo_unref(&array[i].robj); drm_free_large(array); return r; } -- cgit v0.10.2 From a65ecc4070c75ed53553def79e5da862b47daf1f Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 25 Feb 2016 10:47:10 +0300 Subject: drm/amd: cleanup get_mfd_cell_dev() It's simpler to just use snprintf() to print this to one buffer instead of using strcpy() and strcat(). Also using snprintf() is slightly safer than using sprintf(). Signed-off-by: Dan Carpenter Signed-off-by: Alex Deucher diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c index 9f8cfaa..d6b0bff 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c @@ -240,12 +240,10 @@ static int acp_poweron(struct generic_pm_domain *genpd) static struct device *get_mfd_cell_dev(const char *device_name, int r) { char auto_dev_name[25]; - char buf[8]; struct device *dev; - sprintf(buf, ".%d.auto", r); - strcpy(auto_dev_name, device_name); - strcat(auto_dev_name, buf); + snprintf(auto_dev_name, sizeof(auto_dev_name), + "%s.%d.auto", device_name, r); dev = bus_find_device_by_name(&platform_bus_type, NULL, auto_dev_name); dev_info(dev, "device %s added to pm domain\n", auto_dev_name); -- cgit v0.10.2 From 18aee16d7dfa47e88017a95aae9e73d1cf22c7a0 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 25 Feb 2016 01:45:56 -0500 Subject: drm/radeon: update radeon acpi header Add some new defs for ATPX. Signed-off-by: Alex Deucher diff --git a/drivers/gpu/drm/radeon/radeon_acpi.h b/drivers/gpu/drm/radeon/radeon_acpi.h index be4af76..cd872f7 100644 --- a/drivers/gpu/drm/radeon/radeon_acpi.h +++ b/drivers/gpu/drm/radeon/radeon_acpi.h @@ -291,6 +291,8 @@ int radeon_atif_handler(struct radeon_device *rdev, # define ATPX_FIXED_NOT_SUPPORTED (1 << 9) # define ATPX_DYNAMIC_DGPU_POWER_OFF_SUPPORTED (1 << 10) # define ATPX_DGPU_REQ_POWER_FOR_DISPLAYS (1 << 11) +# define ATPX_DGPU_CAN_DRIVE_DISPLAYS (1 << 12) +# define ATPX_MS_HYBRID_GFX_SUPPORTED (1 << 14) #define ATPX_FUNCTION_POWER_CONTROL 0x2 /* ARG0: ATPX_FUNCTION_POWER_CONTROL * ARG1: -- cgit v0.10.2 From ce22c4bfdf70c03e9308751df5c2bf78b79a893f Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 25 Feb 2016 01:47:53 -0500 Subject: drm/amdgpu: update radeon acpi header Add some new defs for ATPX. Signed-off-by: Alex Deucher diff --git a/drivers/gpu/drm/amd/include/amd_acpi.h b/drivers/gpu/drm/amd/include/amd_acpi.h index 496360e..50e8933 100644 --- a/drivers/gpu/drm/amd/include/amd_acpi.h +++ b/drivers/gpu/drm/amd/include/amd_acpi.h @@ -340,6 +340,8 @@ struct atcs_pref_req_output { # define ATPX_FIXED_NOT_SUPPORTED (1 << 9) # define ATPX_DYNAMIC_DGPU_POWER_OFF_SUPPORTED (1 << 10) # define ATPX_DGPU_REQ_POWER_FOR_DISPLAYS (1 << 11) +# define ATPX_DGPU_CAN_DRIVE_DISPLAYS (1 << 12) +# define ATPX_MS_HYBRID_GFX_SUPPORTED (1 << 14) #define ATPX_FUNCTION_POWER_CONTROL 0x2 /* ARG0: ATPX_FUNCTION_POWER_CONTROL * ARG1: -- cgit v0.10.2 From 4ff37a83f19dab4e67299325ee22e98346eee857 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 26 Feb 2016 16:18:26 +0100 Subject: drm/amdgpu: fix VM faults caused by vm_grab_id() v4 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The owner must be per ring as long as we don't support sharing VMIDs per process. Also move the assigned VMID and page directory address into the IB structure. v3: assign the VMID to all IBs, not just the first one. v4: use correct pointer for owner Signed-off-by: Christian König Reviewed-by: Chunming Zhou Acked-by: Alex Deucher Signed-off-by: Alex Deucher diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index f5bac97..0c42a85 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -769,8 +769,9 @@ struct amdgpu_ib { uint32_t *ptr; struct amdgpu_fence *fence; struct amdgpu_user_fence *user; - bool grabbed_vmid; struct amdgpu_vm *vm; + unsigned vm_id; + uint64_t vm_pd_addr; struct amdgpu_ctx *ctx; uint32_t gds_base, gds_size; uint32_t gws_base, gws_size; @@ -877,10 +878,10 @@ struct amdgpu_vm_pt { }; struct amdgpu_vm_id { - unsigned id; - uint64_t pd_gpu_addr; + struct amdgpu_vm_manager_id *mgr_id; + uint64_t pd_gpu_addr; /* last flushed PD/PT update */ - struct fence *flushed_updates; + struct fence *flushed_updates; }; struct amdgpu_vm { @@ -954,10 +955,11 @@ void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates); void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, struct amdgpu_vm *vm); int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, - struct amdgpu_sync *sync, struct fence *fence); + struct amdgpu_sync *sync, struct fence *fence, + unsigned *vm_id, uint64_t *vm_pd_addr); void amdgpu_vm_flush(struct amdgpu_ring *ring, - struct amdgpu_vm *vm, - struct fence *updates); + unsigned vmid, + uint64_t pd_addr); uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr); int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, struct amdgpu_vm *vm); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index b5bdd5d..db14a7b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -75,6 +75,7 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, } ib->vm = vm; + ib->vm_id = 0; return 0; } @@ -139,7 +140,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, return -EINVAL; } - if (vm && !ibs->grabbed_vmid) { + if (vm && !ibs->vm_id) { dev_err(adev->dev, "VM IB without ID\n"); return -EINVAL; } @@ -152,10 +153,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, if (vm) { /* do context switch */ - amdgpu_vm_flush(ring, vm, last_vm_update); + amdgpu_vm_flush(ring, ib->vm_id, ib->vm_pd_addr); if (ring->funcs->emit_gds_switch) - amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id, + amdgpu_ring_emit_gds_switch(ring, ib->vm_id, ib->gds_base, ib->gds_size, ib->gws_base, ib->gws_size, ib->oa_base, ib->oa_size); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index f29bbb9..90e52f7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -105,16 +105,23 @@ static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job) struct fence *fence = amdgpu_sync_get_fence(&job->sync); - if (fence == NULL && vm && !job->ibs->grabbed_vmid) { + if (fence == NULL && vm && !job->ibs->vm_id) { struct amdgpu_ring *ring = job->ring; + unsigned i, vm_id; + uint64_t vm_pd_addr; int r; r = amdgpu_vm_grab_id(vm, ring, &job->sync, - &job->base.s_fence->base); + &job->base.s_fence->base, + &vm_id, &vm_pd_addr); if (r) DRM_ERROR("Error getting VM ID (%d)\n", r); - else - job->ibs->grabbed_vmid = true; + else { + for (i = 0; i < job->num_ibs; ++i) { + job->ibs[i].vm_id = vm_id; + job->ibs[i].vm_pd_addr = vm_pd_addr; + } + } fence = amdgpu_sync_get_fence(&job->sync); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 264c596..ba90924 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -50,6 +50,9 @@ * SI supports 16. */ +/* Special value that no flush is necessary */ +#define AMDGPU_VM_NO_FLUSH (~0ll) + /** * amdgpu_vm_num_pde - return the number of page directory entries * @@ -157,50 +160,69 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, * Allocate an id for the vm, adding fences to the sync obj as necessary. */ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, - struct amdgpu_sync *sync, struct fence *fence) + struct amdgpu_sync *sync, struct fence *fence, + unsigned *vm_id, uint64_t *vm_pd_addr) { - struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; + uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); struct amdgpu_device *adev = ring->adev; - struct amdgpu_vm_manager_id *id; + struct amdgpu_vm_id *id = &vm->ids[ring->idx]; + struct fence *updates = sync->last_vm_update; int r; mutex_lock(&adev->vm_manager.lock); /* check if the id is still valid */ - if (vm_id->id) { + if (id->mgr_id) { + struct fence *flushed = id->flushed_updates; + bool is_later; long owner; - id = &adev->vm_manager.ids[vm_id->id]; - owner = atomic_long_read(&id->owner); - if (owner == (long)vm) { - list_move_tail(&id->list, &adev->vm_manager.ids_lru); - trace_amdgpu_vm_grab_id(vm, vm_id->id, ring->idx); + if (!flushed) + is_later = true; + else if (!updates) + is_later = false; + else + is_later = fence_is_later(updates, flushed); + + owner = atomic_long_read(&id->mgr_id->owner); + if (!is_later && owner == (long)id && + pd_addr == id->pd_gpu_addr) { + + fence_put(id->mgr_id->active); + id->mgr_id->active = fence_get(fence); + + list_move_tail(&id->mgr_id->list, + &adev->vm_manager.ids_lru); - fence_put(id->active); - id->active = fence_get(fence); + *vm_id = id->mgr_id - adev->vm_manager.ids; + *vm_pd_addr = AMDGPU_VM_NO_FLUSH; + trace_amdgpu_vm_grab_id(vm, *vm_id, ring->idx); mutex_unlock(&adev->vm_manager.lock); return 0; } } - /* we definately need to flush */ - vm_id->pd_gpu_addr = ~0ll; + id->mgr_id = list_first_entry(&adev->vm_manager.ids_lru, + struct amdgpu_vm_manager_id, + list); - id = list_first_entry(&adev->vm_manager.ids_lru, - struct amdgpu_vm_manager_id, - list); - list_move_tail(&id->list, &adev->vm_manager.ids_lru); - atomic_long_set(&id->owner, (long)vm); + r = amdgpu_sync_fence(ring->adev, sync, id->mgr_id->active); + if (!r) { + fence_put(id->mgr_id->active); + id->mgr_id->active = fence_get(fence); - vm_id->id = id - adev->vm_manager.ids; - trace_amdgpu_vm_grab_id(vm, vm_id->id, ring->idx); + fence_put(id->flushed_updates); + id->flushed_updates = fence_get(updates); - r = amdgpu_sync_fence(ring->adev, sync, id->active); + id->pd_gpu_addr = pd_addr; - if (!r) { - fence_put(id->active); - id->active = fence_get(fence); + list_move_tail(&id->mgr_id->list, &adev->vm_manager.ids_lru); + atomic_long_set(&id->mgr_id->owner, (long)id); + + *vm_id = id->mgr_id - adev->vm_manager.ids; + *vm_pd_addr = pd_addr; + trace_amdgpu_vm_grab_id(vm, *vm_id, ring->idx); } mutex_unlock(&adev->vm_manager.lock); @@ -211,35 +233,18 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, * amdgpu_vm_flush - hardware flush the vm * * @ring: ring to use for flush - * @vm: vm we want to flush - * @updates: last vm update that we waited for + * @vmid: vmid number to use + * @pd_addr: address of the page directory * - * Flush the vm. + * Emit a VM flush when it is necessary. */ void amdgpu_vm_flush(struct amdgpu_ring *ring, - struct amdgpu_vm *vm, - struct fence *updates) + unsigned vmid, + uint64_t pd_addr) { - uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); - struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; - struct fence *flushed_updates = vm_id->flushed_updates; - bool is_later; - - if (!flushed_updates) - is_later = true; - else if (!updates) - is_later = false; - else - is_later = fence_is_later(updates, flushed_updates); - - if (pd_addr != vm_id->pd_gpu_addr || is_later) { - trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id); - if (is_later) { - vm_id->flushed_updates = fence_get(updates); - fence_put(flushed_updates); - } - vm_id->pd_gpu_addr = pd_addr; - amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr); + if (pd_addr != AMDGPU_VM_NO_FLUSH) { + trace_amdgpu_vm_flush(pd_addr, ring->idx, vmid); + amdgpu_ring_emit_vm_flush(ring, vmid, pd_addr); } } @@ -1284,7 +1289,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) int i, r; for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { - vm->ids[i].id = 0; + vm->ids[i].mgr_id = NULL; vm->ids[i].flushed_updates = NULL; } vm->va = RB_ROOT; @@ -1381,13 +1386,13 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) amdgpu_bo_unref(&vm->page_directory); fence_put(vm->page_directory_fence); for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { - unsigned id = vm->ids[i].id; + struct amdgpu_vm_id *id = &vm->ids[i]; - atomic_long_cmpxchg(&adev->vm_manager.ids[id].owner, - (long)vm, 0); - fence_put(vm->ids[i].flushed_updates); + if (id->mgr_id) + atomic_long_cmpxchg(&id->mgr_id->owner, + (long)id, 0); + fence_put(id->flushed_updates); } - } /** diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index 675f349..e4e4b2a 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -212,7 +212,7 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) { - u32 extra_bits = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf; + u32 extra_bits = ib->vm_id & 0xf; u32 next_rptr = ring->wptr + 5; while ((next_rptr & 7) != 4) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index bc5bdaf..9cdf595 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -2043,8 +2043,7 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, else header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); - control |= ib->length_dw | - (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0); + control |= ib->length_dw | (ib->vm_id << 24); amdgpu_ring_write(ring, header); amdgpu_ring_write(ring, @@ -2072,8 +2071,7 @@ static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring, header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); - control |= ib->length_dw | - (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0); + control |= ib->length_dw | (ib->vm_id << 24); amdgpu_ring_write(ring, header); amdgpu_ring_write(ring, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 71d536e..5f67a18 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -4619,8 +4619,7 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, else header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); - control |= ib->length_dw | - (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0); + control |= ib->length_dw | (ib->vm_id << 24); amdgpu_ring_write(ring, header); amdgpu_ring_write(ring, @@ -4649,8 +4648,7 @@ static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring, header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); - control |= ib->length_dw | - (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0); + control |= ib->length_dw | (ib->vm_id << 24); amdgpu_ring_write(ring, header); amdgpu_ring_write(ring, diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index 29ec986..dddb8d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -244,7 +244,7 @@ static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) { - u32 vmid = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf; + u32 vmid = ib->vm_id & 0xf; u32 next_rptr = ring->wptr + 5; while ((next_rptr & 7) != 2) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 6f064d7..19e02f7 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -355,7 +355,7 @@ static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) { - u32 vmid = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf; + u32 vmid = ib->vm_id & 0xf; u32 next_rptr = ring->wptr + 5; while ((next_rptr & 7) != 2) -- cgit v0.10.2 From 22073fe764c9ff2742c27a8c06f28ef6cd9a56e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 26 Feb 2016 16:18:36 +0100 Subject: drm/amdgpu: trace the pd_addr in vm_grab_id as well MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Makes matching it to the flushes much easier. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Acked-by: Alex Deucher Signed-off-by: Alex Deucher diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 9ca3735..26a5f4a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -100,21 +100,24 @@ TRACE_EVENT(amdgpu_sched_run_job, TRACE_EVENT(amdgpu_vm_grab_id, - TP_PROTO(struct amdgpu_vm *vm, unsigned vmid, int ring), - TP_ARGS(vm, vmid, ring), + TP_PROTO(struct amdgpu_vm *vm, int ring, unsigned vmid, + uint64_t pd_addr), + TP_ARGS(vm, ring, vmid, pd_addr), TP_STRUCT__entry( __field(struct amdgpu_vm *, vm) - __field(u32, vmid) __field(u32, ring) + __field(u32, vmid) + __field(u64, pd_addr) ), TP_fast_assign( __entry->vm = vm; - __entry->vmid = vmid; __entry->ring = ring; + __entry->vmid = vmid; + __entry->pd_addr = pd_addr; ), - TP_printk("vm=%p, id=%u, ring=%u", __entry->vm, __entry->vmid, - __entry->ring) + TP_printk("vm=%p, ring=%u, id=%u, pd_addr=%010Lx", __entry->vm, + __entry->ring, __entry->vmid, __entry->pd_addr) ); TRACE_EVENT(amdgpu_vm_bo_map, @@ -231,8 +234,8 @@ TRACE_EVENT(amdgpu_vm_flush, __entry->ring = ring; __entry->id = id; ), - TP_printk("pd_addr=%010Lx, ring=%u, id=%u", - __entry->pd_addr, __entry->ring, __entry->id) + TP_printk("ring=%u, id=%u, pd_addr=%010Lx", + __entry->ring, __entry->id, __entry->pd_addr) ); TRACE_EVENT(amdgpu_bo_list_set, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index ba90924..d9dc8be 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -196,7 +196,8 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, *vm_id = id->mgr_id - adev->vm_manager.ids; *vm_pd_addr = AMDGPU_VM_NO_FLUSH; - trace_amdgpu_vm_grab_id(vm, *vm_id, ring->idx); + trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, + *vm_pd_addr); mutex_unlock(&adev->vm_manager.lock); return 0; @@ -222,7 +223,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, *vm_id = id->mgr_id - adev->vm_manager.ids; *vm_pd_addr = pd_addr; - trace_amdgpu_vm_grab_id(vm, *vm_id, ring->idx); + trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr); } mutex_unlock(&adev->vm_manager.lock); -- cgit v0.10.2 From 6157bd7a1009c2a6944fb3eee8ed2b3dea091fd8 Mon Sep 17 00:00:00 2001 From: Flora Cui Date: Thu, 3 Mar 2016 12:59:49 +0800 Subject: drm/amdgpu: fix rb bitmap & cu bitmap calculation Fix some copy paste typos. Signed-off-by: Flora Cui Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h index 7f6d457..60d4493 100644 --- a/drivers/gpu/drm/amd/amdgpu/cikd.h +++ b/drivers/gpu/drm/amd/amdgpu/cikd.h @@ -46,9 +46,6 @@ #define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001 #define HAWAII_GB_ADDR_CONFIG_GOLDEN 0x12011003 -#define CIK_RB_BITMAP_WIDTH_PER_SH 2 -#define HAWAII_RB_BITMAP_WIDTH_PER_SH 4 - #define AMDGPU_NUM_OF_VMIDS 8 #define PIPEID(x) ((x) << 0) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 9cdf595..8fb7ebf 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -1637,18 +1637,16 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev) int i, j; u32 data; u32 active_rbs = 0; + u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se / + adev->gfx.config.max_sh_per_se; mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { gfx_v7_0_select_se_sh(adev, i, j); data = gfx_v7_0_get_rb_active_bitmap(adev); - if (adev->asic_type == CHIP_HAWAII) - active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * - HAWAII_RB_BITMAP_WIDTH_PER_SH); - else - active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * - CIK_RB_BITMAP_WIDTH_PER_SH); + active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * + rb_bitmap_width_per_sh); } } gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); @@ -3820,8 +3818,7 @@ static u32 gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device *adev) data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; - mask = gfx_v7_0_create_bitmask(adev->gfx.config.max_backends_per_se / - adev->gfx.config.max_sh_per_se); + mask = gfx_v7_0_create_bitmask(adev->gfx.config.max_cu_per_sh); return (~data) & mask; } @@ -5232,6 +5229,8 @@ int gfx_v7_0_get_cu_info(struct amdgpu_device *adev, if (!adev || !cu_info) return -EINVAL; + memset(cu_info, 0, sizeof(*cu_info)); + mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 5f67a18..e37378f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -2615,6 +2615,8 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev) int i, j; u32 data; u32 active_rbs = 0; + u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se / + adev->gfx.config.max_sh_per_se; mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { @@ -2622,7 +2624,7 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev) gfx_v8_0_select_se_sh(adev, i, j); data = gfx_v8_0_get_rb_active_bitmap(adev); active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * - RB_BITMAP_WIDTH_PER_SH); + rb_bitmap_width_per_sh); } } gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); @@ -5126,8 +5128,7 @@ static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev) data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; - mask = gfx_v8_0_create_bitmask(adev->gfx.config.max_backends_per_se / - adev->gfx.config.max_sh_per_se); + mask = gfx_v8_0_create_bitmask(adev->gfx.config.max_cu_per_sh); return (~data) & mask; } @@ -5141,6 +5142,8 @@ int gfx_v8_0_get_cu_info(struct amdgpu_device *adev, if (!adev || !cu_info) return -EINVAL; + memset(cu_info, 0, sizeof(*cu_info)); + mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h index d98aa9d..ace4997 100644 --- a/drivers/gpu/drm/amd/amdgpu/vid.h +++ b/drivers/gpu/drm/amd/amdgpu/vid.h @@ -71,8 +71,6 @@ #define VMID(x) ((x) << 4) #define QUEUEID(x) ((x) << 8) -#define RB_BITMAP_WIDTH_PER_SH 2 - #define MC_SEQ_MISC0__MT__MASK 0xf0000000 #define MC_SEQ_MISC0__MT__GDDR1 0x10000000 #define MC_SEQ_MISC0__MT__DDR2 0x20000000 -- cgit v0.10.2