diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-08-02 01:44:08 (GMT) |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-08-02 01:44:08 (GMT) |
commit | 731c7d3a205ba89b475b2aa71b5f13dd6ae3de56 (patch) | |
tree | d2b9c3e0a98b94dfc3e4e60e35622c0143ef4ed4 /drivers/gpu/drm/amd | |
parent | 77a87824ed676ca8ff8482e4157d3adb284fd381 (diff) | |
parent | 753e7c8cbd8c503b962294303c7b5e9ea8513443 (diff) | |
download | linux-731c7d3a205ba89b475b2aa71b5f13dd6ae3de56.tar.xz |
Merge tag 'drm-for-v4.8' of git://people.freedesktop.org/~airlied/linux
Merge drm updates from Dave Airlie:
"This is the main drm pull request for 4.8.
I'm down with a cold at the moment so hopefully this isn't in too bad
a state, I finished pulling stuff last week mostly (nouveau fixes just
went in today), so only this message should be influenced by illness.
Apologies to anyone who's major feature I missed :-)
Core:
Lockless GEM BO freeing
Non-blocking atomic work
Documentation changes (rst/sphinx)
Prep for new fencing changes
Simple display helpers
Master/auth changes
Register/unregister rework
Loads of trivial patches/fixes.
New stuff:
ARM Mali display driver (not the 3D chip)
sii902x RGB->HDMI bridge
Panel:
Support for new panels
Improved backlight support
Bridge:
Convert ADV7511 to bridge driver
ADV7533 support
TC358767 (DSI/DPI to eDP) encoder chip support
i915:
BXT support enabled by default
GVT-g infrastructure
GuC command submission and fixes
BXT workarounds
SKL/BKL workarounds
Demidlayering device registration
Thundering herd fixes
Missing pci ids
Atomic updates
amdgpu/radeon:
ATPX improvements for better dGPU power control on PX systems
New power features for CZ/BR/ST
Pipelined BO moves and evictions in TTM
GPU scheduler improvements
GPU reset improvements
Overclocking on dGPUs with amdgpu
Polaris powermanagement enabled
nouveau:
GK20A/GM20B volt and clock improvements.
Initial support for GP100/GP104 GPUs, GP104 will not yet support
acceleration due to NVIDIA having not released firmware for them as of yet.
exynos:
Exynos5433 SoC with IOMMU support.
vc4:
Shader validation for branching
imx-drm:
Atomic mode setting conversion
Reworked DMFC FIFO allocation
External bridge support
analogix-dp:
RK3399 eDP support
Lots of fixes.
rockchip:
Lots of small fixes.
msm:
DT bindings cleanups
Shrinker and madvise support
ASoC HDMI codec support
tegra:
Host1x driver cleanups
SOR reworking for DP support
Runtime PM support
omapdrm:
PLL enhancements
Header refactoring
Gamma table support
arcgpu:
Simulator support
virtio-gpu:
Atomic modesetting fixes.
rcar-du:
Misc fixes.
mediatek:
MT8173 HDMI support
sti:
ASOC HDMI codec support
Minor fixes
fsl-dcu:
Suspend/resume support
Bridge support
amdkfd:
Minor fixes.
etnaviv:
Enable GPU clock gating
hisilicon:
Vblank and other fixes"
* tag 'drm-for-v4.8' of git://people.freedesktop.org/~airlied/linux: (1575 commits)
drm/nouveau/gr/nv3x: fix instobj write offsets in gr setup
drm/nouveau/acpi: fix lockup with PCIe runtime PM
drm/nouveau/acpi: check for function 0x1B before using it
drm/nouveau/acpi: return supported DSM functions
drm/nouveau/acpi: ensure matching ACPI handle and supported functions
drm/nouveau/fbcon: fix font width not divisible by 8
drm/amd/powerplay: remove enable_clock_power_gatings_tasks from initialize and resume events
drm/amd/powerplay: move clockgating to after ungating power in pp for uvd/vce
drm/amdgpu: add query device id and revision id into system info entry at CGS
drm/amdgpu: add new definition in bif header
drm/amd/powerplay: rename smum header guards
drm/amdgpu: enable UVD context buffer for older HW
drm/amdgpu: fix default UVD context size
drm/amdgpu: fix incorrect type of info_id
drm/amdgpu: make amdgpu_cgs_call_acpi_method as static
drm/amdgpu: comment out unused defaults_staturn_pro static const structure to fix the build
drm/amdgpu: enable UVD VM only on polaris
drm/amdgpu: increase timeout of IB test
drm/amdgpu: add destroy session when generate VCE destroy msg.
drm/amd: fix deadlock of job_list_lock V2
...
Diffstat (limited to 'drivers/gpu/drm/amd')
113 files changed, 5132 insertions, 2280 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index e055d5be..8ebc5f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -85,8 +85,12 @@ extern int amdgpu_vm_debug; extern int amdgpu_sched_jobs; extern int amdgpu_sched_hw_submission; extern int amdgpu_powerplay; +extern int amdgpu_powercontainment; extern unsigned amdgpu_pcie_gen_cap; extern unsigned amdgpu_pcie_lane_cap; +extern unsigned amdgpu_cg_mask; +extern unsigned amdgpu_pg_mask; +extern char *amdgpu_disable_cu; #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ @@ -183,6 +187,10 @@ int amdgpu_set_clockgating_state(struct amdgpu_device *adev, int amdgpu_set_powergating_state(struct amdgpu_device *adev, enum amd_ip_block_type block_type, enum amd_powergating_state state); +int amdgpu_wait_for_idle(struct amdgpu_device *adev, + enum amd_ip_block_type block_type); +bool amdgpu_is_idle(struct amdgpu_device *adev, + enum amd_ip_block_type block_type); struct amdgpu_ip_block_version { enum amd_ip_block_type type; @@ -298,13 +306,16 @@ struct amdgpu_ring_funcs { uint32_t oa_base, uint32_t oa_size); /* testing functions */ int (*test_ring)(struct amdgpu_ring *ring); - int (*test_ib)(struct amdgpu_ring *ring); + int (*test_ib)(struct amdgpu_ring *ring, long timeout); /* insert NOP packets */ void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count); /* pad the indirect buffer to the necessary number of dw */ void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib); unsigned (*init_cond_exec)(struct amdgpu_ring *ring); void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset); + /* note usage for clock and power gating */ + void (*begin_use)(struct amdgpu_ring *ring); + void (*end_use)(struct amdgpu_ring *ring); }; /* @@ -594,11 +605,9 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, struct reservation_object *resv, void *owner); -bool amdgpu_sync_is_idle(struct amdgpu_sync *sync); -int amdgpu_sync_cycle_fences(struct amdgpu_sync *dst, struct amdgpu_sync *src, - struct fence *fence); +struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, + struct amdgpu_ring *ring); struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync); -int amdgpu_sync_wait(struct amdgpu_sync *sync); void amdgpu_sync_free(struct amdgpu_sync *sync); int amdgpu_sync_init(void); void amdgpu_sync_fini(void); @@ -754,12 +763,11 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, struct amdgpu_job **job); +void amdgpu_job_free_resources(struct amdgpu_job *job); void amdgpu_job_free(struct amdgpu_job *job); -void amdgpu_job_free_func(struct kref *refcount); int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, struct amd_sched_entity *entity, void *owner, struct fence **f); -void amdgpu_job_timeout_func(struct work_struct *work); struct amdgpu_ring { struct amdgpu_device *adev; @@ -767,12 +775,9 @@ struct amdgpu_ring { struct amdgpu_fence_driver fence_drv; struct amd_gpu_scheduler sched; - spinlock_t fence_lock; struct amdgpu_bo *ring_obj; volatile uint32_t *ring; unsigned rptr_offs; - u64 next_rptr_gpu_addr; - volatile u32 *next_rptr_cpu_addr; unsigned wptr; unsigned wptr_old; unsigned ring_size; @@ -791,14 +796,16 @@ struct amdgpu_ring { u32 doorbell_index; bool use_doorbell; unsigned wptr_offs; - unsigned next_rptr_offs; unsigned fence_offs; uint64_t current_ctx; enum amdgpu_ring_type type; char name[16]; unsigned cond_exe_offs; - u64 cond_exe_gpu_addr; - volatile u32 *cond_exe_cpu_addr; + u64 cond_exe_gpu_addr; + volatile u32 *cond_exe_cpu_addr; +#if defined(CONFIG_DEBUG_FS) + struct dentry *ent; +#endif }; /* @@ -861,6 +868,7 @@ struct amdgpu_vm { struct amdgpu_bo *page_directory; unsigned max_pde_used; struct fence *page_directory_fence; + uint64_t last_eviction_counter; /* array of page tables, one for each page directory entry */ struct amdgpu_vm_pt *page_tables; @@ -883,13 +891,14 @@ struct amdgpu_vm_id { struct fence *first; struct amdgpu_sync active; struct fence *last_flush; - struct amdgpu_ring *last_user; atomic64_t owner; uint64_t pd_gpu_addr; /* last flushed PD/PT update */ struct fence *flushed_updates; + uint32_t current_gpu_reset_count; + uint32_t gds_base; uint32_t gds_size; uint32_t gws_base; @@ -905,6 +914,10 @@ struct amdgpu_vm_manager { struct list_head ids_lru; struct amdgpu_vm_id ids[AMDGPU_NUM_VM]; + /* Handling of VM fences */ + u64 fence_context; + unsigned seqno[AMDGPU_MAX_RINGS]; + uint32_t max_pfn; /* vram base address for page table entry */ u64 vram_base_offset; @@ -926,17 +939,14 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, struct list_head *validated, struct amdgpu_bo_list_entry *entry); -void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates); +void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, + struct list_head *duplicates); void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, struct amdgpu_vm *vm); int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, struct amdgpu_sync *sync, struct fence *fence, - unsigned *vm_id, uint64_t *vm_pd_addr); -int amdgpu_vm_flush(struct amdgpu_ring *ring, - unsigned vm_id, uint64_t pd_addr, - uint32_t gds_base, uint32_t gds_size, - uint32_t gws_base, uint32_t gws_size, - uint32_t oa_base, uint32_t oa_size); + struct amdgpu_job *job); +int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job); void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id); uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr); int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, @@ -1142,6 +1152,12 @@ struct amdgpu_cu_info { uint32_t bitmap[4][4]; }; +struct amdgpu_gfx_funcs { + /* get the gpu clock counter */ + uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev); + void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance); +}; + struct amdgpu_gfx { struct mutex gpu_clock_mutex; struct amdgpu_gca_config config; @@ -1178,6 +1194,7 @@ struct amdgpu_gfx { /* ce ram size*/ unsigned ce_ram_size; struct amdgpu_cu_info cu_info; + const struct amdgpu_gfx_funcs *funcs; }; int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, @@ -1195,10 +1212,6 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count); void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib); void amdgpu_ring_commit(struct amdgpu_ring *ring); void amdgpu_ring_undo(struct amdgpu_ring *ring); -unsigned amdgpu_ring_backup(struct amdgpu_ring *ring, - uint32_t **data); -int amdgpu_ring_restore(struct amdgpu_ring *ring, - unsigned size, uint32_t *data); int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned ring_size, u32 nop, u32 align_mask, struct amdgpu_irq_src *irq_src, unsigned irq_type, @@ -1250,6 +1263,7 @@ struct amdgpu_job { uint32_t num_ibs; void *owner; uint64_t ctx; + bool vm_needs_flush; unsigned vm_id; uint64_t vm_pd_addr; uint32_t gds_base, gds_size; @@ -1257,8 +1271,7 @@ struct amdgpu_job { uint32_t oa_base, oa_size; /* user fence handling */ - struct amdgpu_bo *uf_bo; - uint32_t uf_offset; + uint64_t uf_addr; uint64_t uf_sequence; }; @@ -1560,6 +1573,12 @@ struct amdgpu_dpm_funcs { u32 (*get_fan_control_mode)(struct amdgpu_device *adev); int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed); int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed); + int (*force_clock_level)(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t mask); + int (*print_clock_levels)(struct amdgpu_device *adev, enum pp_clock_type type, char *buf); + int (*get_sclk_od)(struct amdgpu_device *adev); + int (*set_sclk_od)(struct amdgpu_device *adev, uint32_t value); + int (*get_mclk_od)(struct amdgpu_device *adev); + int (*set_mclk_od)(struct amdgpu_device *adev, uint32_t value); }; struct amdgpu_dpm { @@ -1662,6 +1681,7 @@ struct amdgpu_uvd { struct amdgpu_ring ring; struct amdgpu_irq_src irq; bool address_64_bit; + bool use_ctx_buf; struct amd_sched_entity entity; }; @@ -1683,6 +1703,7 @@ struct amdgpu_vce { struct drm_file *filp[AMDGPU_MAX_VCE_HANDLES]; uint32_t img_size[AMDGPU_MAX_VCE_HANDLES]; struct delayed_work idle_work; + struct mutex idle_mutex; const struct firmware *fw; /* VCE firmware */ struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS]; struct amdgpu_irq_src irq; @@ -1767,6 +1788,8 @@ int amdgpu_debugfs_init(struct drm_minor *minor); void amdgpu_debugfs_cleanup(struct drm_minor *minor); #endif +int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev); + /* * amdgpu smumgr functions */ @@ -1811,12 +1834,8 @@ struct amdgpu_asic_funcs { u32 sh_num, u32 reg_offset, u32 *value); void (*set_vga_state)(struct amdgpu_device *adev, bool state); int (*reset)(struct amdgpu_device *adev); - /* wait for mc_idle */ - int (*wait_for_mc_idle)(struct amdgpu_device *adev); /* get the reference clock */ u32 (*get_xclk)(struct amdgpu_device *adev); - /* get the gpu clock counter */ - uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev); /* MM block clocks */ int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk); int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk); @@ -2003,6 +2022,10 @@ struct amdgpu_device { spinlock_t didt_idx_lock; amdgpu_rreg_t didt_rreg; amdgpu_wreg_t didt_wreg; + /* protects concurrent gc_cac register access */ + spinlock_t gc_cac_idx_lock; + amdgpu_rreg_t gc_cac_rreg; + amdgpu_wreg_t gc_cac_wreg; /* protects concurrent ENDPOINT (audio) register access */ spinlock_t audio_endpt_idx_lock; amdgpu_block_rreg_t audio_endpt_rreg; @@ -2028,6 +2051,7 @@ struct amdgpu_device { atomic64_t vram_vis_usage; atomic64_t gtt_usage; atomic64_t num_bytes_moved; + atomic64_t num_evictions; atomic_t gpu_reset_counter; /* display */ @@ -2038,7 +2062,7 @@ struct amdgpu_device { struct amdgpu_irq_src hpd_irq; /* rings */ - unsigned fence_context; + u64 fence_context; unsigned num_rings; struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; bool ib_pool_ready; @@ -2131,6 +2155,8 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v); #define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v)) #define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg)) #define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v)) +#define RREG32_GC_CAC(reg) adev->gc_cac_rreg(adev, (reg)) +#define WREG32_GC_CAC(reg, v) adev->gc_cac_wreg(adev, (reg), (v)) #define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg)) #define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v)) #define WREG32_P(reg, val, mask) \ @@ -2206,12 +2232,10 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) */ #define amdgpu_asic_set_vga_state(adev, state) (adev)->asic_funcs->set_vga_state((adev), (state)) #define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev)) -#define amdgpu_asic_wait_for_mc_idle(adev) (adev)->asic_funcs->wait_for_mc_idle((adev)) #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev)) #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d)) #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec)) #define amdgpu_asic_get_virtual_caps(adev) ((adev)->asic_funcs->get_virtual_caps((adev))) -#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev)) #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev)) #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l)) #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v))) @@ -2222,7 +2246,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags))) #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib))) #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r)) -#define amdgpu_ring_test_ib(r) (r)->funcs->test_ib((r)) +#define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t)) #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r)) #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r)) #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r)) @@ -2264,6 +2288,8 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps)) #define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev)) #define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e)) +#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev)) +#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance)) #define amdgpu_dpm_get_temperature(adev) \ ((adev)->pp_enabled ? \ @@ -2342,6 +2368,18 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_dpm_force_clock_level(adev, type, level) \ (adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level) +#define amdgpu_dpm_get_sclk_od(adev) \ + (adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle) + +#define amdgpu_dpm_set_sclk_od(adev, value) \ + (adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value) + +#define amdgpu_dpm_get_mclk_od(adev) \ + ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle)) + +#define amdgpu_dpm_set_mclk_od(adev, value) \ + ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value)) + #define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \ (adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output)) @@ -2383,9 +2421,13 @@ bool amdgpu_device_is_px(struct drm_device *dev); #if defined(CONFIG_VGA_SWITCHEROO) void amdgpu_register_atpx_handler(void); void amdgpu_unregister_atpx_handler(void); +bool amdgpu_has_atpx_dgpu_power_cntl(void); +bool amdgpu_is_atpx_hybrid(void); #else static inline void amdgpu_register_atpx_handler(void) {} static inline void amdgpu_unregister_atpx_handler(void) {} +static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; } +static inline bool amdgpu_is_atpx_hybrid(void) { return false; } #endif /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 32809f7..d080d08 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -240,8 +240,8 @@ uint64_t get_gpu_clock_counter(struct kgd_dev *kgd) { struct amdgpu_device *rdev = (struct amdgpu_device *)kgd; - if (rdev->asic_funcs->get_gpu_clock_counter) - return rdev->asic_funcs->get_gpu_clock_counter(rdev); + if (rdev->gfx.funcs->get_gpu_clock_counter) + return rdev->gfx.funcs->get_gpu_clock_counter(rdev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 9df1bcb..9831753 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -551,28 +551,19 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev) le16_to_cpu(firmware_info->info.usReferenceClock); ppll->reference_div = 0; - if (crev < 2) - ppll->pll_out_min = - le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output); - else - ppll->pll_out_min = - le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output); + ppll->pll_out_min = + le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output); ppll->pll_out_max = le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output); - if (crev >= 4) { - ppll->lcd_pll_out_min = - le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100; - if (ppll->lcd_pll_out_min == 0) - ppll->lcd_pll_out_min = ppll->pll_out_min; - ppll->lcd_pll_out_max = - le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100; - if (ppll->lcd_pll_out_max == 0) - ppll->lcd_pll_out_max = ppll->pll_out_max; - } else { + ppll->lcd_pll_out_min = + le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100; + if (ppll->lcd_pll_out_min == 0) ppll->lcd_pll_out_min = ppll->pll_out_min; + ppll->lcd_pll_out_max = + le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100; + if (ppll->lcd_pll_out_max == 0) ppll->lcd_pll_out_max = ppll->pll_out_max; - } if (ppll->pll_out_min == 0) ppll->pll_out_min = 64800; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index 35a1248..49de926 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c @@ -10,6 +10,7 @@ #include <linux/slab.h> #include <linux/acpi.h> #include <linux/pci.h> +#include <linux/delay.h> #include "amd_acpi.h" @@ -27,6 +28,7 @@ struct amdgpu_atpx_functions { struct amdgpu_atpx { acpi_handle handle; struct amdgpu_atpx_functions functions; + bool is_hybrid; }; static struct amdgpu_atpx_priv { @@ -63,6 +65,14 @@ bool amdgpu_has_atpx(void) { return amdgpu_atpx_priv.atpx_detected; } +bool amdgpu_has_atpx_dgpu_power_cntl(void) { + return amdgpu_atpx_priv.atpx.functions.power_cntl; +} + +bool amdgpu_is_atpx_hybrid(void) { + return amdgpu_atpx_priv.atpx.is_hybrid; +} + /** * amdgpu_atpx_call - call an ATPX method * @@ -142,18 +152,12 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas */ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx) { - /* make sure required functions are enabled */ - /* dGPU power control is required */ - if (atpx->functions.power_cntl == false) { - printk("ATPX dGPU power cntl not present, forcing\n"); - atpx->functions.power_cntl = true; - } + u32 valid_bits = 0; if (atpx->functions.px_params) { union acpi_object *info; struct atpx_px_params output; size_t size; - u32 valid_bits; info = amdgpu_atpx_call(atpx->handle, ATPX_FUNCTION_GET_PX_PARAMETERS, NULL); if (!info) @@ -172,19 +176,43 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx) memcpy(&output, info->buffer.pointer, size); valid_bits = output.flags & output.valid_flags; - /* if separate mux flag is set, mux controls are required */ - if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) { - atpx->functions.i2c_mux_cntl = true; - atpx->functions.disp_mux_cntl = true; - } - /* if any outputs are muxed, mux controls are required */ - if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED | - ATPX_TV_SIGNAL_MUXED | - ATPX_DFP_SIGNAL_MUXED)) - atpx->functions.disp_mux_cntl = true; kfree(info); } + + /* if separate mux flag is set, mux controls are required */ + if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) { + atpx->functions.i2c_mux_cntl = true; + atpx->functions.disp_mux_cntl = true; + } + /* if any outputs are muxed, mux controls are required */ + if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED | + ATPX_TV_SIGNAL_MUXED | + ATPX_DFP_SIGNAL_MUXED)) + atpx->functions.disp_mux_cntl = true; + + + /* some bioses set these bits rather than flagging power_cntl as supported */ + if (valid_bits & (ATPX_DYNAMIC_PX_SUPPORTED | + ATPX_DYNAMIC_DGPU_POWER_OFF_SUPPORTED)) + atpx->functions.power_cntl = true; + + atpx->is_hybrid = false; + if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) { + printk("ATPX Hybrid Graphics\n"); +#if 1 + /* This is a temporary hack until the D3 cold support + * makes it upstream. The ATPX power_control method seems + * to still work on even if the system should be using + * the new standardized hybrid D3 cold ACPI interface. + */ + atpx->functions.power_cntl = true; +#else + atpx->functions.power_cntl = false; +#endif + atpx->is_hybrid = true; + } + return 0; } @@ -259,6 +287,10 @@ static int amdgpu_atpx_set_discrete_state(struct amdgpu_atpx *atpx, u8 state) if (!info) return -EIO; kfree(info); + + /* 200ms delay is required after off */ + if (state == 0) + msleep(200); } return 0; } @@ -507,7 +539,6 @@ static int amdgpu_atpx_get_client_id(struct pci_dev *pdev) static const struct vga_switcheroo_handler amdgpu_atpx_handler = { .switchto = amdgpu_atpx_switchto, .power_state = amdgpu_atpx_power_state, - .init = amdgpu_atpx_init, .get_client_id = amdgpu_atpx_get_client_id, }; @@ -542,6 +573,7 @@ static bool amdgpu_atpx_detect(void) printk(KERN_INFO "vga_switcheroo: detected switching method %s handle\n", acpi_method_name); amdgpu_atpx_priv.atpx_detected = true; + amdgpu_atpx_init(); return true; } return false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index 99ca75b..2b6afe1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c @@ -352,22 +352,22 @@ bool amdgpu_get_bios(struct amdgpu_device *adev) uint16_t tmp, bios_header_start; r = amdgpu_atrm_get_bios(adev); - if (r == false) + if (!r) r = amdgpu_acpi_vfct_bios(adev); - if (r == false) + if (!r) r = igp_read_bios_from_vram(adev); - if (r == false) + if (!r) r = amdgpu_read_bios(adev); - if (r == false) { + if (!r) { r = amdgpu_read_bios_from_rom(adev); } - if (r == false) { + if (!r) { r = amdgpu_read_disabled_bios(adev); } - if (r == false) { + if (!r) { r = amdgpu_read_platform_bios(adev); } - if (r == false || adev->bios == NULL) { + if (!r || adev->bios == NULL) { DRM_ERROR("Unable to locate a BIOS ROM\n"); adev->bios = NULL; return false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index 823bf5e..651115d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c @@ -94,6 +94,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev, unsigned last_entry = 0, first_userptr = num_entries; unsigned i; int r; + unsigned long total_size = 0; array = drm_malloc_ab(num_entries, sizeof(struct amdgpu_bo_list_entry)); if (!array) @@ -140,6 +141,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev, if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_OA) oa_obj = entry->robj; + total_size += amdgpu_bo_size(entry->robj); trace_amdgpu_bo_list_set(list, entry->robj); } @@ -155,6 +157,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev, list->array = array; list->num_entries = num_entries; + trace_amdgpu_cs_bo_status(list->num_entries, total_size); return 0; error_free: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index cf6f49f..bc0440f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -312,6 +312,8 @@ static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device, return RREG32_UVD_CTX(index); case CGS_IND_REG__DIDT: return RREG32_DIDT(index); + case CGS_IND_REG_GC_CAC: + return RREG32_GC_CAC(index); case CGS_IND_REG__AUDIO_ENDPT: DRM_ERROR("audio endpt register access not implemented.\n"); return 0; @@ -336,6 +338,8 @@ static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device, return WREG32_UVD_CTX(index, value); case CGS_IND_REG__DIDT: return WREG32_DIDT(index, value); + case CGS_IND_REG_GC_CAC: + return WREG32_GC_CAC(index, value); case CGS_IND_REG__AUDIO_ENDPT: DRM_ERROR("audio endpt register access not implemented.\n"); return; @@ -748,6 +752,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, if (!adev->pm.fw) { switch (adev->asic_type) { + case CHIP_TOPAZ: + strcpy(fw_name, "amdgpu/topaz_smc.bin"); + break; case CHIP_TONGA: strcpy(fw_name, "amdgpu/tonga_smc.bin"); break; @@ -787,6 +794,7 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, } hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; + amdgpu_ucode_print_smc_hdr(&hdr->header); adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version); ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes); ucode_start_address = le32_to_cpu(hdr->ucode_start_addr); @@ -795,13 +803,14 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, info->version = adev->pm.fw_version; info->image_size = ucode_size; + info->ucode_start_address = ucode_start_address; info->kptr = (void *)src; } return 0; } static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device, - struct cgs_system_info *sys_info) + struct cgs_system_info *sys_info) { CGS_FUNC_ADEV; @@ -821,6 +830,12 @@ static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device, case CGS_SYSTEM_INFO_PCIE_MLW: sys_info->value = adev->pm.pcie_mlw_mask; break; + case CGS_SYSTEM_INFO_PCIE_DEV: + sys_info->value = adev->pdev->device; + break; + case CGS_SYSTEM_INFO_PCIE_REV: + sys_info->value = adev->pdev->revision; + break; case CGS_SYSTEM_INFO_CG_FLAGS: sys_info->value = adev->cg_flags; break; @@ -830,6 +845,9 @@ static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device, case CGS_SYSTEM_INFO_GFX_CU_INFO: sys_info->value = adev->gfx.cu_info.number; break; + case CGS_SYSTEM_INFO_GFX_SE_INFO: + sys_info->value = adev->gfx.config.max_shader_engines; + break; default: return -ENODEV; } @@ -903,14 +921,12 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device, acpi_handle handle; struct acpi_object_list input; struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; - union acpi_object *params = NULL; - union acpi_object *obj = NULL; + union acpi_object *params, *obj; uint8_t name[5] = {'\0'}; - struct cgs_acpi_method_argument *argument = NULL; + struct cgs_acpi_method_argument *argument; uint32_t i, count; acpi_status status; - int result = 0; - uint32_t func_no = 0xFFFFFFFF; + int result; handle = ACPI_HANDLE(&adev->pdev->dev); if (!handle) @@ -927,7 +943,6 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device, if (info->pinput_argument == NULL) return -EINVAL; argument = info->pinput_argument; - func_no = argument->value; for (i = 0; i < info->input_count; i++) { if (((argument->type == ACPI_TYPE_STRING) || (argument->type == ACPI_TYPE_BUFFER)) && @@ -972,11 +987,11 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device, params->integer.value = argument->value; break; case ACPI_TYPE_STRING: - params->string.length = argument->method_length; + params->string.length = argument->data_length; params->string.pointer = argument->pointer; break; case ACPI_TYPE_BUFFER: - params->buffer.length = argument->method_length; + params->buffer.length = argument->data_length; params->buffer.pointer = argument->pointer; break; default: @@ -996,7 +1011,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device, if (ACPI_FAILURE(status)) { result = -EIO; - goto error; + goto free_input; } /* return the output info */ @@ -1006,7 +1021,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device, if ((obj->type != ACPI_TYPE_PACKAGE) || (obj->package.count != count)) { result = -EIO; - goto error; + goto free_obj; } params = obj->package.elements; } else @@ -1014,13 +1029,13 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device, if (params == NULL) { result = -EIO; - goto error; + goto free_obj; } for (i = 0; i < count; i++) { if (argument->type != params->type) { result = -EIO; - goto error; + goto free_obj; } switch (params->type) { case ACPI_TYPE_INTEGER: @@ -1030,7 +1045,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device, if ((params->string.length != argument->data_length) || (params->string.pointer == NULL)) { result = -EIO; - goto error; + goto free_obj; } strncpy(argument->pointer, params->string.pointer, @@ -1039,7 +1054,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device, case ACPI_TYPE_BUFFER: if (params->buffer.pointer == NULL) { result = -EIO; - goto error; + goto free_obj; } memcpy(argument->pointer, params->buffer.pointer, @@ -1052,9 +1067,10 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device, params++; } -error: - if (obj != NULL) - kfree(obj); + result = 0; +free_obj: + kfree(obj); +free_input: kfree((void *)input.pointer); return result; } @@ -1066,7 +1082,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device, } #endif -int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device, +static int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device, uint32_t acpi_method, uint32_t acpi_function, void *pinput, void *poutput, @@ -1079,17 +1095,14 @@ int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device, struct cgs_acpi_method_info info = {0}; acpi_input[0].type = CGS_ACPI_TYPE_INTEGER; - acpi_input[0].method_length = sizeof(uint32_t); acpi_input[0].data_length = sizeof(uint32_t); acpi_input[0].value = acpi_function; acpi_input[1].type = CGS_ACPI_TYPE_BUFFER; - acpi_input[1].method_length = CGS_ACPI_MAX_BUFFER_SIZE; acpi_input[1].data_length = input_size; acpi_input[1].pointer = pinput; acpi_output.type = CGS_ACPI_TYPE_BUFFER; - acpi_output.method_length = CGS_ACPI_MAX_BUFFER_SIZE; acpi_output.data_length = output_size; acpi_output.pointer = poutput; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index cb07da4..ff0b55a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -1690,7 +1690,6 @@ amdgpu_connector_add(struct amdgpu_device *adev, DRM_MODE_SCALE_NONE); /* no HPD on analog connectors */ amdgpu_connector->hpd.hpd = AMDGPU_HPD_NONE; - connector->polled = DRM_CONNECTOR_POLL_CONNECT; connector->interlace_allowed = true; connector->doublescan_allowed = true; break; @@ -1893,8 +1892,10 @@ amdgpu_connector_add(struct amdgpu_device *adev, } if (amdgpu_connector->hpd.hpd == AMDGPU_HPD_NONE) { - if (i2c_bus->valid) - connector->polled = DRM_CONNECTOR_POLL_CONNECT; + if (i2c_bus->valid) { + connector->polled = DRM_CONNECTOR_POLL_CONNECT | + DRM_CONNECTOR_POLL_DISCONNECT; + } } else connector->polled = DRM_CONNECTOR_POLL_HPD; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 9bc8f1d..0307ff5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -216,11 +216,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) if (ret) goto free_all_kdata; - if (p->uf_entry.robj) { - p->job->uf_bo = amdgpu_bo_ref(p->uf_entry.robj); - p->job->uf_offset = uf_offset; - } - + if (p->uf_entry.robj) + p->job->uf_addr = uf_offset; kfree(chunk_array); return 0; @@ -459,7 +456,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, list_splice(&need_pages, &p->validated); } - amdgpu_vm_get_pt_bos(&fpriv->vm, &duplicates); + amdgpu_vm_get_pt_bos(p->adev, &fpriv->vm, &duplicates); p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev); p->bytes_moved = 0; @@ -472,6 +469,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, if (r) goto error_validate; + fpriv->vm.last_eviction_counter = + atomic64_read(&p->adev->num_evictions); + if (p->bo_list) { struct amdgpu_bo *gds = p->bo_list->gds_obj; struct amdgpu_bo *gws = p->bo_list->gws_obj; @@ -499,6 +499,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, } } + if (p->uf_entry.robj) + p->job->uf_addr += amdgpu_bo_gpu_offset(p->uf_entry.robj); + error_validate: if (r) { amdgpu_vm_move_pt_bos_in_lru(p->adev, &fpriv->vm); @@ -653,18 +656,21 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, /* Only for UVD/VCE VM emulation */ if (ring->funcs->parse_cs) { + p->job->vm = NULL; for (i = 0; i < p->job->num_ibs; i++) { r = amdgpu_ring_parse_cs(ring, p, i); if (r) return r; } - } + } else { + p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); - r = amdgpu_bo_vm_update_pte(p, vm); - if (!r) - amdgpu_cs_sync_rings(p); + r = amdgpu_bo_vm_update_pte(p, vm); + if (r) + return r; + } - return r; + return amdgpu_cs_sync_rings(p); } static int amdgpu_cs_handle_lockup(struct amdgpu_device *adev, int r) @@ -761,7 +767,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, } /* UVD & VCE fw doesn't support user fences */ - if (parser->job->uf_bo && ( + if (parser->job->uf_addr && ( parser->job->ring->type == AMDGPU_RING_TYPE_UVD || parser->job->ring->type == AMDGPU_RING_TYPE_VCE)) return -EINVAL; @@ -830,17 +836,13 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, { struct amdgpu_ring *ring = p->job->ring; struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity; - struct fence *fence; struct amdgpu_job *job; int r; job = p->job; p->job = NULL; - r = amd_sched_job_init(&job->base, &ring->sched, - entity, amdgpu_job_timeout_func, - amdgpu_job_free_func, - p->filp, &fence); + r = amd_sched_job_init(&job->base, &ring->sched, entity, p->filp); if (r) { amdgpu_job_free(job); return r; @@ -848,9 +850,10 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, job->owner = p->filp; job->ctx = entity->fence_context; - p->fence = fence_get(fence); - cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, fence); + p->fence = fence_get(&job->base.s_fence->finished); + cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence); job->uf_sequence = cs->out.handle; + amdgpu_job_free_resources(job); trace_amdgpu_cs_ioctl(job); amd_sched_entity_push_job(&job->base); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 6e92008..df7ab245 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -25,6 +25,7 @@ * Alex Deucher * Jerome Glisse */ +#include <linux/kthread.h> #include <linux/console.h> #include <linux/slab.h> #include <linux/debugfs.h> @@ -35,6 +36,7 @@ #include <linux/vga_switcheroo.h> #include <linux/efi.h> #include "amdgpu.h" +#include "amdgpu_trace.h" #include "amdgpu_i2c.h" #include "atom.h" #include "amdgpu_atombios.h" @@ -79,24 +81,27 @@ bool amdgpu_device_is_px(struct drm_device *dev) uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, bool always_indirect) { + uint32_t ret; + if ((reg * 4) < adev->rmmio_size && !always_indirect) - return readl(((void __iomem *)adev->rmmio) + (reg * 4)); + ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); else { unsigned long flags; - uint32_t ret; spin_lock_irqsave(&adev->mmio_idx_lock, flags); writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4)); ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4)); spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); - - return ret; } + trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret); + return ret; } void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, bool always_indirect) { + trace_amdgpu_mm_wreg(adev->pdev->device, reg, v); + if ((reg * 4) < adev->rmmio_size && !always_indirect) writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); else { @@ -1070,11 +1075,14 @@ int amdgpu_set_clockgating_state(struct amdgpu_device *adev, int i, r = 0; for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_block_status[i].valid) + continue; if (adev->ip_blocks[i].type == block_type) { r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, state); if (r) return r; + break; } } return r; @@ -1087,16 +1095,53 @@ int amdgpu_set_powergating_state(struct amdgpu_device *adev, int i, r = 0; for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_block_status[i].valid) + continue; if (adev->ip_blocks[i].type == block_type) { r = adev->ip_blocks[i].funcs->set_powergating_state((void *)adev, state); if (r) return r; + break; } } return r; } +int amdgpu_wait_for_idle(struct amdgpu_device *adev, + enum amd_ip_block_type block_type) +{ + int i, r; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_block_status[i].valid) + continue; + if (adev->ip_blocks[i].type == block_type) { + r = adev->ip_blocks[i].funcs->wait_for_idle((void *)adev); + if (r) + return r; + break; + } + } + return 0; + +} + +bool amdgpu_is_idle(struct amdgpu_device *adev, + enum amd_ip_block_type block_type) +{ + int i; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_block_status[i].valid) + continue; + if (adev->ip_blocks[i].type == block_type) + return adev->ip_blocks[i].funcs->is_idle((void *)adev); + } + return true; + +} + const struct amdgpu_ip_block_version * amdgpu_get_ip_block( struct amdgpu_device *adev, enum amd_ip_block_type type) @@ -1209,6 +1254,9 @@ static int amdgpu_early_init(struct amdgpu_device *adev) } } + adev->cg_flags &= amdgpu_cg_mask; + adev->pg_flags &= amdgpu_pg_mask; + return 0; } @@ -1440,9 +1488,12 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->uvd_ctx_wreg = &amdgpu_invalid_wreg; adev->didt_rreg = &amdgpu_invalid_rreg; adev->didt_wreg = &amdgpu_invalid_wreg; + adev->gc_cac_rreg = &amdgpu_invalid_rreg; + adev->gc_cac_wreg = &amdgpu_invalid_wreg; adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg; adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg; + DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n", amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device, pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision); @@ -1467,6 +1518,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, spin_lock_init(&adev->pcie_idx_lock); spin_lock_init(&adev->uvd_ctx_idx_lock); spin_lock_init(&adev->didt_idx_lock); + spin_lock_init(&adev->gc_cac_idx_lock); spin_lock_init(&adev->audio_endpt_idx_lock); adev->rmmio_base = pci_resource_start(adev->pdev, 5); @@ -1511,17 +1563,20 @@ int amdgpu_device_init(struct amdgpu_device *adev, vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain); /* Read BIOS */ - if (!amdgpu_get_bios(adev)) - return -EINVAL; + if (!amdgpu_get_bios(adev)) { + r = -EINVAL; + goto failed; + } /* Must be an ATOMBIOS */ if (!adev->is_atom_bios) { dev_err(adev->dev, "Expecting atombios for GPU\n"); - return -EINVAL; + r = -EINVAL; + goto failed; } r = amdgpu_atombios_init(adev); if (r) { dev_err(adev->dev, "amdgpu_atombios_init failed\n"); - return r; + goto failed; } /* See if the asic supports SR-IOV */ @@ -1538,7 +1593,8 @@ int amdgpu_device_init(struct amdgpu_device *adev, !(adev->virtualization.caps & AMDGPU_VIRT_CAPS_SRIOV_EN))) { if (!adev->bios) { dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n"); - return -EINVAL; + r = -EINVAL; + goto failed; } DRM_INFO("GPU not posted. posting now...\n"); amdgpu_atom_asic_init(adev->mode_info.atom_context); @@ -1548,7 +1604,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_atombios_get_clock_info(adev); if (r) { dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n"); - return r; + goto failed; } /* init i2c buses */ amdgpu_atombios_i2c_init(adev); @@ -1557,7 +1613,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_fence_driver_init(adev); if (r) { dev_err(adev->dev, "amdgpu_fence_driver_init failed\n"); - return r; + goto failed; } /* init the mode config */ @@ -1567,7 +1623,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, if (r) { dev_err(adev->dev, "amdgpu_init failed\n"); amdgpu_fini(adev); - return r; + goto failed; } adev->accel_working = true; @@ -1577,7 +1633,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_ib_pool_init(adev); if (r) { dev_err(adev->dev, "IB initialization failed (%d).\n", r); - return r; + goto failed; } r = amdgpu_ib_ring_tests(adev); @@ -1594,6 +1650,12 @@ int amdgpu_device_init(struct amdgpu_device *adev, DRM_ERROR("registering register debugfs failed (%d).\n", r); } + r = amdgpu_debugfs_firmware_init(adev); + if (r) { + DRM_ERROR("registering firmware debugfs failed (%d).\n", r); + return r; + } + if ((amdgpu_testing & 1)) { if (adev->accel_working) amdgpu_test_moves(adev); @@ -1619,10 +1681,15 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_late_init(adev); if (r) { dev_err(adev->dev, "amdgpu_late_init failed\n"); - return r; + goto failed; } return 0; + +failed: + if (runtime) + vga_switcheroo_fini_domain_pm_ops(adev->dev); + return r; } static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev); @@ -1645,6 +1712,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev) amdgpu_bo_evict_vram(adev); amdgpu_ib_pool_fini(adev); amdgpu_fence_driver_fini(adev); + drm_crtc_force_disable_all(adev->ddev); amdgpu_fbdev_fini(adev); r = amdgpu_fini(adev); kfree(adev->ip_block_status); @@ -1656,6 +1724,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev) kfree(adev->bios); adev->bios = NULL; vga_switcheroo_unregister_client(adev->pdev); + if (adev->flags & AMD_IS_PX) + vga_switcheroo_fini_domain_pm_ops(adev->dev); vga_client_register(adev->pdev, NULL, NULL, NULL); if (adev->rio_mem) pci_iounmap(adev->pdev, adev->rio_mem); @@ -1841,7 +1911,23 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon) } drm_kms_helper_poll_enable(dev); + + /* + * Most of the connector probing functions try to acquire runtime pm + * refs to ensure that the GPU is powered on when connector polling is + * performed. Since we're calling this from a runtime PM callback, + * trying to acquire rpm refs will cause us to deadlock. + * + * Since we're guaranteed to be holding the rpm lock, it's safe to + * temporarily disable the rpm helpers so this doesn't deadlock us. + */ +#ifdef CONFIG_PM + dev->dev->power.disable_depth++; +#endif drm_helper_hpd_irq_event(dev); +#ifdef CONFIG_PM + dev->dev->power.disable_depth--; +#endif if (fbcon) { amdgpu_fbdev_set_suspend(adev, 0); @@ -1861,11 +1947,6 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon) */ int amdgpu_gpu_reset(struct amdgpu_device *adev) { - unsigned ring_sizes[AMDGPU_MAX_RINGS]; - uint32_t *ring_data[AMDGPU_MAX_RINGS]; - - bool saved = false; - int i, r; int resched; @@ -1874,22 +1955,30 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev) /* block TTM */ resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); - r = amdgpu_suspend(adev); - + /* block scheduler */ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; + if (!ring) continue; - - ring_sizes[i] = amdgpu_ring_backup(ring, &ring_data[i]); - if (ring_sizes[i]) { - saved = true; - dev_info(adev->dev, "Saved %d dwords of commands " - "on ring %d.\n", ring_sizes[i], i); - } + kthread_park(ring->sched.thread); + amd_sched_hw_job_reset(&ring->sched); } + /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ + amdgpu_fence_driver_force_completion(adev); + + /* save scratch */ + amdgpu_atombios_scratch_regs_save(adev); + r = amdgpu_suspend(adev); retry: + /* Disable fb access */ + if (adev->mode_info.num_crtc) { + struct amdgpu_mode_mc_save save; + amdgpu_display_stop_mc_access(adev, &save); + amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC); + } + r = amdgpu_asic_reset(adev); /* post card */ amdgpu_atom_asic_init(adev->mode_info.atom_context); @@ -1898,32 +1987,29 @@ retry: dev_info(adev->dev, "GPU reset succeeded, trying to resume\n"); r = amdgpu_resume(adev); } - + /* restore scratch */ + amdgpu_atombios_scratch_regs_restore(adev); if (!r) { + r = amdgpu_ib_ring_tests(adev); + if (r) { + dev_err(adev->dev, "ib ring test failed (%d).\n", r); + r = amdgpu_suspend(adev); + goto retry; + } + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; if (!ring) continue; - - amdgpu_ring_restore(ring, ring_sizes[i], ring_data[i]); - ring_sizes[i] = 0; - ring_data[i] = NULL; - } - - r = amdgpu_ib_ring_tests(adev); - if (r) { - dev_err(adev->dev, "ib ring test failed (%d).\n", r); - if (saved) { - saved = false; - r = amdgpu_suspend(adev); - goto retry; - } + amd_sched_job_recovery(&ring->sched); + kthread_unpark(ring->sched.thread); } } else { - amdgpu_fence_driver_force_completion(adev); + dev_err(adev->dev, "asic resume failed (%d).\n", r); for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { - if (adev->rings[i]) - kfree(ring_data[i]); + if (adev->rings[i]) { + kthread_unpark(adev->rings[i]->sched.thread); + } } } @@ -1934,13 +2020,11 @@ retry: /* bad news, how to tell it to userspace ? */ dev_info(adev->dev, "GPU reset failed\n"); } + amdgpu_irq_gpu_reset_resume_helper(adev); return r; } -#define AMDGPU_DEFAULT_PCIE_GEN_MASK 0x30007 /* gen: chipset 1/2, asic 1/2/3 */ -#define AMDGPU_DEFAULT_PCIE_MLW_MASK 0x2f0000 /* 1/2/4/8/16 lanes */ - void amdgpu_get_pcie_info(struct amdgpu_device *adev) { u32 mask; @@ -2094,20 +2178,43 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf, struct amdgpu_device *adev = f->f_inode->i_private; ssize_t result = 0; int r; + bool use_bank; + unsigned instance_bank, sh_bank, se_bank; if (size & 0x3 || *pos & 0x3) return -EINVAL; + if (*pos & (1ULL << 62)) { + se_bank = (*pos >> 24) & 0x3FF; + sh_bank = (*pos >> 34) & 0x3FF; + instance_bank = (*pos >> 44) & 0x3FF; + use_bank = 1; + *pos &= 0xFFFFFF; + } else { + use_bank = 0; + } + + if (use_bank) { + if (sh_bank >= adev->gfx.config.max_sh_per_se || + se_bank >= adev->gfx.config.max_shader_engines) + return -EINVAL; + mutex_lock(&adev->grbm_idx_mutex); + amdgpu_gfx_select_se_sh(adev, se_bank, + sh_bank, instance_bank); + } + while (size) { uint32_t value; if (*pos > adev->rmmio_size) - return result; + goto end; value = RREG32(*pos >> 2); r = put_user(value, (uint32_t *)buf); - if (r) - return r; + if (r) { + result = r; + goto end; + } result += 4; buf += 4; @@ -2115,6 +2222,12 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf, size -= 4; } +end: + if (use_bank) { + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); + } + return result; } @@ -2314,6 +2427,68 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user * return result; } +static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = f->f_inode->i_private; + ssize_t result = 0; + int r; + uint32_t *config, no_regs = 0; + + if (size & 0x3 || *pos & 0x3) + return -EINVAL; + + config = kmalloc(256 * sizeof(*config), GFP_KERNEL); + if (!config) + return -ENOMEM; + + /* version, increment each time something is added */ + config[no_regs++] = 0; + config[no_regs++] = adev->gfx.config.max_shader_engines; + config[no_regs++] = adev->gfx.config.max_tile_pipes; + config[no_regs++] = adev->gfx.config.max_cu_per_sh; + config[no_regs++] = adev->gfx.config.max_sh_per_se; + config[no_regs++] = adev->gfx.config.max_backends_per_se; + config[no_regs++] = adev->gfx.config.max_texture_channel_caches; + config[no_regs++] = adev->gfx.config.max_gprs; + config[no_regs++] = adev->gfx.config.max_gs_threads; + config[no_regs++] = adev->gfx.config.max_hw_contexts; + config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend; + config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend; + config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size; + config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size; + config[no_regs++] = adev->gfx.config.num_tile_pipes; + config[no_regs++] = adev->gfx.config.backend_enable_mask; + config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes; + config[no_regs++] = adev->gfx.config.mem_row_size_in_kb; + config[no_regs++] = adev->gfx.config.shader_engine_tile_size; + config[no_regs++] = adev->gfx.config.num_gpus; + config[no_regs++] = adev->gfx.config.multi_gpu_tile_size; + config[no_regs++] = adev->gfx.config.mc_arb_ramcfg; + config[no_regs++] = adev->gfx.config.gb_addr_config; + config[no_regs++] = adev->gfx.config.num_rbs; + + while (size && (*pos < no_regs * 4)) { + uint32_t value; + + value = config[*pos >> 2]; + r = put_user(value, (uint32_t *)buf); + if (r) { + kfree(config); + return r; + } + + result += 4; + buf += 4; + *pos += 4; + size -= 4; + } + + kfree(config); + return result; +} + + static const struct file_operations amdgpu_debugfs_regs_fops = { .owner = THIS_MODULE, .read = amdgpu_debugfs_regs_read, @@ -2339,11 +2514,18 @@ static const struct file_operations amdgpu_debugfs_regs_smc_fops = { .llseek = default_llseek }; +static const struct file_operations amdgpu_debugfs_gca_config_fops = { + .owner = THIS_MODULE, + .read = amdgpu_debugfs_gca_config_read, + .llseek = default_llseek +}; + static const struct file_operations *debugfs_regs[] = { &amdgpu_debugfs_regs_fops, &amdgpu_debugfs_regs_didt_fops, &amdgpu_debugfs_regs_pcie_fops, &amdgpu_debugfs_regs_smc_fops, + &amdgpu_debugfs_gca_config_fops, }; static const char *debugfs_regs_names[] = { @@ -2351,6 +2533,7 @@ static const char *debugfs_regs_names[] = { "amdgpu_regs_didt", "amdgpu_regs_pcie", "amdgpu_regs_smc", + "amdgpu_gca_config", }; static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index b0832da..76f9602 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -122,7 +122,7 @@ static void amdgpu_flip_work_func(struct work_struct *__work) spin_unlock_irqrestore(&crtc->dev->event_lock, flags); usleep_range(min_udelay, 2 * min_udelay); spin_lock_irqsave(&crtc->dev->event_lock, flags); - }; + } if (!repcnt) DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, " @@ -220,19 +220,17 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, r = amdgpu_bo_pin_restricted(new_rbo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, &base); if (unlikely(r != 0)) { - amdgpu_bo_unreserve(new_rbo); r = -EINVAL; DRM_ERROR("failed to pin new rbo buffer before flip\n"); - goto cleanup; + goto unreserve; } r = reservation_object_get_fences_rcu(new_rbo->tbo.resv, &work->excl, &work->shared_count, &work->shared); if (unlikely(r != 0)) { - amdgpu_bo_unreserve(new_rbo); DRM_ERROR("failed to get fences for buffer\n"); - goto cleanup; + goto unpin; } amdgpu_bo_get_tiling_flags(new_rbo, &tiling_flags); @@ -240,7 +238,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, work->base = base; - r = drm_vblank_get(crtc->dev, amdgpu_crtc->crtc_id); + r = drm_crtc_vblank_get(crtc); if (r) { DRM_ERROR("failed to get vblank before flip\n"); goto pflip_cleanup; @@ -268,16 +266,18 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, return 0; vblank_cleanup: - drm_vblank_put(crtc->dev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_put(crtc); pflip_cleanup: if (unlikely(amdgpu_bo_reserve(new_rbo, false) != 0)) { DRM_ERROR("failed to reserve new rbo in error path\n"); goto cleanup; } +unpin: if (unlikely(amdgpu_bo_unpin(new_rbo) != 0)) { DRM_ERROR("failed to unpin new rbo in error path\n"); } +unreserve: amdgpu_bo_unreserve(new_rbo); cleanup: @@ -516,9 +516,7 @@ static void amdgpu_user_framebuffer_destroy(struct drm_framebuffer *fb) { struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb); - if (amdgpu_fb->obj) { - drm_gem_object_unreference_unlocked(amdgpu_fb->obj); - } + drm_gem_object_unreference_unlocked(amdgpu_fb->obj); drm_framebuffer_cleanup(fb); kfree(amdgpu_fb); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index f888c01..9aa533c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -52,9 +52,10 @@ * - 3.1.0 - allow reading more status registers (GRBM, SRBM, SDMA, CP) * - 3.2.0 - GFX8: Uses EOP_TC_WB_ACTION_EN, so UMDs don't have to do the same * at the end of IBs. + * - 3.3.0 - Add VM support for UVD on supported hardware. */ #define KMS_DRIVER_MAJOR 3 -#define KMS_DRIVER_MINOR 2 +#define KMS_DRIVER_MINOR 3 #define KMS_DRIVER_PATCHLEVEL 0 int amdgpu_vram_limit = 0; @@ -82,8 +83,12 @@ int amdgpu_exp_hw_support = 0; int amdgpu_sched_jobs = 32; int amdgpu_sched_hw_submission = 2; int amdgpu_powerplay = -1; +int amdgpu_powercontainment = 1; unsigned amdgpu_pcie_gen_cap = 0; unsigned amdgpu_pcie_lane_cap = 0; +unsigned amdgpu_cg_mask = 0xffffffff; +unsigned amdgpu_pg_mask = 0xffffffff; +char *amdgpu_disable_cu = NULL; MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); @@ -160,6 +165,9 @@ module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444); #ifdef CONFIG_DRM_AMD_POWERPLAY MODULE_PARM_DESC(powerplay, "Powerplay component (1 = enable, 0 = disable, -1 = auto (default))"); module_param_named(powerplay, amdgpu_powerplay, int, 0444); + +MODULE_PARM_DESC(powercontainment, "Power Containment (1 = enable (default), 0 = disable)"); +module_param_named(powercontainment, amdgpu_powercontainment, int, 0444); #endif MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))"); @@ -168,6 +176,15 @@ module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444); MODULE_PARM_DESC(pcie_lane_cap, "PCIE Lane Caps (0: autodetect (default))"); module_param_named(pcie_lane_cap, amdgpu_pcie_lane_cap, uint, 0444); +MODULE_PARM_DESC(cg_mask, "Clockgating flags mask (0 = disable clock gating)"); +module_param_named(cg_mask, amdgpu_cg_mask, uint, 0444); + +MODULE_PARM_DESC(pg_mask, "Powergating flags mask (0 = disable power gating)"); +module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444); + +MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)"); +module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444); + static const struct pci_device_id pciidlist[] = { #ifdef CONFIG_DRM_AMDGPU_CIK /* Kaveri */ @@ -413,7 +430,10 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev) pci_save_state(pdev); pci_disable_device(pdev); pci_ignore_hotplug(pdev); - pci_set_power_state(pdev, PCI_D3cold); + if (amdgpu_is_atpx_hybrid()) + pci_set_power_state(pdev, PCI_D3cold); + else if (!amdgpu_has_atpx_dgpu_power_cntl()) + pci_set_power_state(pdev, PCI_D3hot); drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF; return 0; @@ -430,7 +450,9 @@ static int amdgpu_pmops_runtime_resume(struct device *dev) drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; - pci_set_power_state(pdev, PCI_D0); + if (amdgpu_is_atpx_hybrid() || + !amdgpu_has_atpx_dgpu_power_cntl()) + pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); ret = pci_enable_device(pdev); if (ret) @@ -515,7 +537,7 @@ static struct drm_driver kms_driver = { .driver_features = DRIVER_USE_AGP | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | - DRIVER_PRIME | DRIVER_RENDER, + DRIVER_PRIME | DRIVER_RENDER | DRIVER_MODESET, .dev_priv_size = 0, .load = amdgpu_driver_load_kms, .open = amdgpu_driver_open_kms, @@ -590,7 +612,6 @@ static int __init amdgpu_init(void) DRM_INFO("amdgpu kernel modesetting enabled.\n"); driver = &kms_driver; pdriver = &amdgpu_kms_pci_driver; - driver->driver_features |= DRIVER_MODESET; driver->num_ioctls = amdgpu_max_kms_ioctl; amdgpu_register_atpx_handler(); /* let modprobe override vga console setting */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index d155876..0b109ae 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -204,16 +204,25 @@ void amdgpu_fence_process(struct amdgpu_ring *ring) if (seq != ring->fence_drv.sync_seq) amdgpu_fence_schedule_fallback(ring); - while (last_seq != seq) { + if (unlikely(seq == last_seq)) + return; + + last_seq &= drv->num_fences_mask; + seq &= drv->num_fences_mask; + + do { struct fence *fence, **ptr; - ptr = &drv->fences[++last_seq & drv->num_fences_mask]; + ++last_seq; + last_seq &= drv->num_fences_mask; + ptr = &drv->fences[last_seq]; /* There is always exactly one thread signaling this fence slot */ fence = rcu_dereference_protected(*ptr, 1); RCU_INIT_POINTER(*ptr, NULL); - BUG_ON(!fence); + if (!fence) + continue; r = fence_signal(fence); if (!r) @@ -222,7 +231,7 @@ void amdgpu_fence_process(struct amdgpu_ring *ring) BUG(); fence_put(fence); - } + } while (last_seq != seq); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 8fab648..88fbed2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -503,7 +503,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, if (r) goto error_print; - amdgpu_vm_get_pt_bos(bo_va->vm, &duplicates); + amdgpu_vm_get_pt_bos(adev, bo_va->vm, &duplicates); list_for_each_entry(entry, &list, head) { domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type); /* if anything is swapped out don't swap it in here, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 9f95da4..a074edd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -70,3 +70,47 @@ void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg) } } } + +/** + * amdgpu_gfx_parse_disable_cu - Parse the disable_cu module parameter + * + * @mask: array in which the per-shader array disable masks will be stored + * @max_se: number of SEs + * @max_sh: number of SHs + * + * The bitmask of CUs to be disabled in the shader array determined by se and + * sh is stored in mask[se * max_sh + sh]. + */ +void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh) +{ + unsigned se, sh, cu; + const char *p; + + memset(mask, 0, sizeof(*mask) * max_se * max_sh); + + if (!amdgpu_disable_cu || !*amdgpu_disable_cu) + return; + + p = amdgpu_disable_cu; + for (;;) { + char *next; + int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu); + if (ret < 3) { + DRM_ERROR("amdgpu: could not parse disable_cu\n"); + return; + } + + if (se < max_se && sh < max_sh && cu < 16) { + DRM_INFO("amdgpu: disabling CU %u.%u.%u\n", se, sh, cu); + mask[se * max_sh + sh] |= 1u << cu; + } else { + DRM_ERROR("amdgpu: disable_cu %u.%u.%u is out of range\n", + se, sh, cu); + } + + next = strchr(p, ','); + if (!next) + break; + p = next + 1; + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index dc06cbd..51321e1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -27,4 +27,6 @@ int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg); void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg); +unsigned amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 34e3542..a31d7ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -33,6 +33,8 @@ #include "amdgpu.h" #include "atom.h" +#define AMDGPU_IB_TEST_TIMEOUT msecs_to_jiffies(1000) + /* * IB * IBs (Indirect Buffers) and areas of GPU accessible memory where @@ -122,7 +124,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, bool skip_preamble, need_ctx_switch; unsigned patch_offset = ~0; struct amdgpu_vm *vm; - struct fence *hwf; uint64_t ctx; unsigned i; @@ -160,10 +161,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, patch_offset = amdgpu_ring_init_cond_exec(ring); if (vm) { - r = amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr, - job->gds_base, job->gds_size, - job->gws_base, job->gws_size, - job->oa_base, job->oa_size); + r = amdgpu_vm_flush(ring, job); if (r) { amdgpu_ring_undo(ring); return r; @@ -193,7 +191,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, if (ring->funcs->emit_hdp_invalidate) amdgpu_ring_emit_hdp_invalidate(ring); - r = amdgpu_fence_emit(ring, &hwf); + r = amdgpu_fence_emit(ring, f); if (r) { dev_err(adev->dev, "failed to emit fence (%d)\n", r); if (job && job->vm_id) @@ -203,17 +201,11 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, } /* wrap the last IB with fence */ - if (job && job->uf_bo) { - uint64_t addr = amdgpu_bo_gpu_offset(job->uf_bo); - - addr += job->uf_offset; - amdgpu_ring_emit_fence(ring, addr, job->uf_sequence, + if (job && job->uf_addr) { + amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence, AMDGPU_FENCE_FLAG_64BIT); } - if (f) - *f = fence_get(hwf); - if (patch_offset != ~0 && ring->funcs->patch_cond_exec) amdgpu_ring_patch_cond_exec(ring, patch_offset); @@ -296,7 +288,7 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev) if (!ring || !ring->ready) continue; - r = amdgpu_ring_test_ib(ring); + r = amdgpu_ring_test_ib(ring, AMDGPU_IB_TEST_TIMEOUT); if (r) { ring->ready = false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 835a3fa..278708f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -383,6 +383,18 @@ int amdgpu_irq_update(struct amdgpu_device *adev, return r; } +void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev) +{ + int i, j; + for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; i++) { + struct amdgpu_irq_src *src = adev->irq.sources[i]; + if (!src) + continue; + for (j = 0; j < src->num_types; j++) + amdgpu_irq_update(adev, src, j); + } +} + /** * amdgpu_irq_get - enable interrupt * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h index e124b59..7ef0935 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h @@ -94,6 +94,7 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type); bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type); +void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev); int amdgpu_irq_add_domain(struct amdgpu_device *adev); void amdgpu_irq_remove_domain(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index f0dafa5..6674d40 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -28,21 +28,15 @@ #include "amdgpu.h" #include "amdgpu_trace.h" -static void amdgpu_job_free_handler(struct work_struct *ws) +static void amdgpu_job_timedout(struct amd_sched_job *s_job) { - struct amdgpu_job *job = container_of(ws, struct amdgpu_job, base.work_free_job); - amd_sched_job_put(&job->base); -} + struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base); -void amdgpu_job_timeout_func(struct work_struct *work) -{ - struct amdgpu_job *job = container_of(work, struct amdgpu_job, base.work_tdr.work); DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n", - job->base.sched->name, - (uint32_t)atomic_read(&job->ring->fence_drv.last_seq), - job->ring->fence_drv.sync_seq); - - amd_sched_job_put(&job->base); + job->base.sched->name, + atomic_read(&job->ring->fence_drv.last_seq), + job->ring->fence_drv.sync_seq); + amdgpu_gpu_reset(job->adev); } int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, @@ -63,7 +57,6 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, (*job)->vm = vm; (*job)->ibs = (void *)&(*job)[1]; (*job)->num_ibs = num_ibs; - INIT_WORK(&(*job)->base.work_free_job, amdgpu_job_free_handler); amdgpu_sync_create(&(*job)->sync); @@ -86,27 +79,33 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, return r; } -void amdgpu_job_free(struct amdgpu_job *job) +void amdgpu_job_free_resources(struct amdgpu_job *job) { - unsigned i; struct fence *f; + unsigned i; + /* use sched fence if available */ - f = (job->base.s_fence)? &job->base.s_fence->base : job->fence; + f = job->base.s_fence ? &job->base.s_fence->finished : job->fence; for (i = 0; i < job->num_ibs; ++i) - amdgpu_sa_bo_free(job->adev, &job->ibs[i].sa_bo, f); - fence_put(job->fence); + amdgpu_ib_free(job->adev, &job->ibs[i], f); +} - amdgpu_bo_unref(&job->uf_bo); - amdgpu_sync_free(&job->sync); +void amdgpu_job_free_cb(struct amd_sched_job *s_job) +{ + struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base); - if (!job->base.use_sched) - kfree(job); + fence_put(job->fence); + amdgpu_sync_free(&job->sync); + kfree(job); } -void amdgpu_job_free_func(struct kref *refcount) +void amdgpu_job_free(struct amdgpu_job *job) { - struct amdgpu_job *job = container_of(refcount, struct amdgpu_job, base.refcount); + amdgpu_job_free_resources(job); + + fence_put(job->fence); + amdgpu_sync_free(&job->sync); kfree(job); } @@ -114,22 +113,20 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, struct amd_sched_entity *entity, void *owner, struct fence **f) { - struct fence *fence; int r; job->ring = ring; if (!f) return -EINVAL; - r = amd_sched_job_init(&job->base, &ring->sched, - entity, amdgpu_job_timeout_func, - amdgpu_job_free_func, owner, &fence); + r = amd_sched_job_init(&job->base, &ring->sched, entity, owner); if (r) return r; job->owner = owner; job->ctx = entity->fence_context; - *f = fence_get(fence); + *f = fence_get(&job->base.s_fence->finished); + amdgpu_job_free_resources(job); amd_sched_entity_push_job(&job->base); return 0; @@ -147,8 +144,8 @@ static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job) int r; r = amdgpu_vm_grab_id(vm, ring, &job->sync, - &job->base.s_fence->base, - &job->vm_id, &job->vm_pd_addr); + &job->base.s_fence->finished, + job); if (r) DRM_ERROR("Error getting VM ID (%d)\n", r); @@ -170,29 +167,24 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job) } job = to_amdgpu_job(sched_job); - r = amdgpu_sync_wait(&job->sync); - if (r) { - DRM_ERROR("failed to sync wait (%d)\n", r); - return NULL; - } + BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL)); trace_amdgpu_sched_run_job(job); r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job->sync.last_vm_update, job, &fence); - if (r) { + if (r) DRM_ERROR("Error scheduling IBs (%d)\n", r); - goto err; - } -err: - job->fence = fence; - amdgpu_job_free(job); + /* if gpu reset, hw fence will be replaced here */ + fence_put(job->fence); + job->fence = fence_get(fence); + amdgpu_job_free_resources(job); return fence; } const struct amd_sched_backend_ops amdgpu_sched_ops = { .dependency = amdgpu_job_dependency, .run_job = amdgpu_job_run, - .begin_job = amd_sched_job_begin, - .finish_job = amd_sched_job_finish, + .timedout_job = amdgpu_job_timedout, + .free_job = amdgpu_job_free_cb }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index d851ea1..d942654 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -60,7 +60,10 @@ int amdgpu_driver_unload_kms(struct drm_device *dev) if (adev->rmmio == NULL) goto done_free; - pm_runtime_get_sync(dev->dev); + if (amdgpu_device_is_px(dev)) { + pm_runtime_get_sync(dev->dev); + pm_runtime_forbid(dev->dev); + } amdgpu_amdkfd_device_fini(adev); @@ -135,13 +138,75 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) } out: - if (r) + if (r) { + /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */ + if (adev->rmmio && amdgpu_device_is_px(dev)) + pm_runtime_put_noidle(dev->dev); amdgpu_driver_unload_kms(dev); - + } return r; } +static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info, + struct drm_amdgpu_query_fw *query_fw, + struct amdgpu_device *adev) +{ + switch (query_fw->fw_type) { + case AMDGPU_INFO_FW_VCE: + fw_info->ver = adev->vce.fw_version; + fw_info->feature = adev->vce.fb_version; + break; + case AMDGPU_INFO_FW_UVD: + fw_info->ver = adev->uvd.fw_version; + fw_info->feature = 0; + break; + case AMDGPU_INFO_FW_GMC: + fw_info->ver = adev->mc.fw_version; + fw_info->feature = 0; + break; + case AMDGPU_INFO_FW_GFX_ME: + fw_info->ver = adev->gfx.me_fw_version; + fw_info->feature = adev->gfx.me_feature_version; + break; + case AMDGPU_INFO_FW_GFX_PFP: + fw_info->ver = adev->gfx.pfp_fw_version; + fw_info->feature = adev->gfx.pfp_feature_version; + break; + case AMDGPU_INFO_FW_GFX_CE: + fw_info->ver = adev->gfx.ce_fw_version; + fw_info->feature = adev->gfx.ce_feature_version; + break; + case AMDGPU_INFO_FW_GFX_RLC: + fw_info->ver = adev->gfx.rlc_fw_version; + fw_info->feature = adev->gfx.rlc_feature_version; + break; + case AMDGPU_INFO_FW_GFX_MEC: + if (query_fw->index == 0) { + fw_info->ver = adev->gfx.mec_fw_version; + fw_info->feature = adev->gfx.mec_feature_version; + } else if (query_fw->index == 1) { + fw_info->ver = adev->gfx.mec2_fw_version; + fw_info->feature = adev->gfx.mec2_feature_version; + } else + return -EINVAL; + break; + case AMDGPU_INFO_FW_SMC: + fw_info->ver = adev->pm.fw_version; + fw_info->feature = 0; + break; + case AMDGPU_INFO_FW_SDMA: + if (query_fw->index >= adev->sdma.num_instances) + return -EINVAL; + fw_info->ver = adev->sdma.instance[query_fw->index].fw_version; + fw_info->feature = adev->sdma.instance[query_fw->index].feature_version; + break; + default: + return -EINVAL; + } + return 0; +} + /* * Userspace get information ioctl */ @@ -288,67 +353,20 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0; } case AMDGPU_INFO_TIMESTAMP: - ui64 = amdgpu_asic_get_gpu_clock_counter(adev); + ui64 = amdgpu_gfx_get_gpu_clock_counter(adev); return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; case AMDGPU_INFO_FW_VERSION: { struct drm_amdgpu_info_firmware fw_info; + int ret; /* We only support one instance of each IP block right now. */ if (info->query_fw.ip_instance != 0) return -EINVAL; - switch (info->query_fw.fw_type) { - case AMDGPU_INFO_FW_VCE: - fw_info.ver = adev->vce.fw_version; - fw_info.feature = adev->vce.fb_version; - break; - case AMDGPU_INFO_FW_UVD: - fw_info.ver = adev->uvd.fw_version; - fw_info.feature = 0; - break; - case AMDGPU_INFO_FW_GMC: - fw_info.ver = adev->mc.fw_version; - fw_info.feature = 0; - break; - case AMDGPU_INFO_FW_GFX_ME: - fw_info.ver = adev->gfx.me_fw_version; - fw_info.feature = adev->gfx.me_feature_version; - break; - case AMDGPU_INFO_FW_GFX_PFP: - fw_info.ver = adev->gfx.pfp_fw_version; - fw_info.feature = adev->gfx.pfp_feature_version; - break; - case AMDGPU_INFO_FW_GFX_CE: - fw_info.ver = adev->gfx.ce_fw_version; - fw_info.feature = adev->gfx.ce_feature_version; - break; - case AMDGPU_INFO_FW_GFX_RLC: - fw_info.ver = adev->gfx.rlc_fw_version; - fw_info.feature = adev->gfx.rlc_feature_version; - break; - case AMDGPU_INFO_FW_GFX_MEC: - if (info->query_fw.index == 0) { - fw_info.ver = adev->gfx.mec_fw_version; - fw_info.feature = adev->gfx.mec_feature_version; - } else if (info->query_fw.index == 1) { - fw_info.ver = adev->gfx.mec2_fw_version; - fw_info.feature = adev->gfx.mec2_feature_version; - } else - return -EINVAL; - break; - case AMDGPU_INFO_FW_SMC: - fw_info.ver = adev->pm.fw_version; - fw_info.feature = 0; - break; - case AMDGPU_INFO_FW_SDMA: - if (info->query_fw.index >= adev->sdma.num_instances) - return -EINVAL; - fw_info.ver = adev->sdma.instance[info->query_fw.index].fw_version; - fw_info.feature = adev->sdma.instance[info->query_fw.index].feature_version; - break; - default: - return -EINVAL; - } + ret = amdgpu_firmware_info(&fw_info, &info->query_fw, adev); + if (ret) + return ret; + return copy_to_user(out, &fw_info, min((size_t)size, sizeof(fw_info))) ? -EFAULT : 0; } @@ -566,6 +584,9 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr); + amdgpu_uvd_free_handles(adev, file_priv); + amdgpu_vce_free_handles(adev, file_priv); + amdgpu_vm_fini(adev, &fpriv->vm); idr_for_each_entry(&fpriv->bo_list_handles, list, handle) @@ -590,10 +611,6 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, void amdgpu_driver_preclose_kms(struct drm_device *dev, struct drm_file *file_priv) { - struct amdgpu_device *adev = dev->dev_private; - - amdgpu_uvd_free_handles(adev, file_priv); - amdgpu_vce_free_handles(adev, file_priv); } /* @@ -756,3 +773,130 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = { DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), }; const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms); + +/* + * Debugfs info + */ +#if defined(CONFIG_DEBUG_FS) + +static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct amdgpu_device *adev = dev->dev_private; + struct drm_amdgpu_info_firmware fw_info; + struct drm_amdgpu_query_fw query_fw; + int ret, i; + + /* VCE */ + query_fw.fw_type = AMDGPU_INFO_FW_VCE; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "VCE feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + + /* UVD */ + query_fw.fw_type = AMDGPU_INFO_FW_UVD; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "UVD feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + + /* GMC */ + query_fw.fw_type = AMDGPU_INFO_FW_GMC; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "MC feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + + /* ME */ + query_fw.fw_type = AMDGPU_INFO_FW_GFX_ME; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "ME feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + + /* PFP */ + query_fw.fw_type = AMDGPU_INFO_FW_GFX_PFP; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "PFP feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + + /* CE */ + query_fw.fw_type = AMDGPU_INFO_FW_GFX_CE; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "CE feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + + /* RLC */ + query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + + /* MEC */ + query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC; + query_fw.index = 0; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "MEC feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + + /* MEC2 */ + if (adev->asic_type == CHIP_KAVERI || + (adev->asic_type > CHIP_TOPAZ && adev->asic_type != CHIP_STONEY)) { + query_fw.index = 1; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "MEC2 feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + } + + /* SMC */ + query_fw.fw_type = AMDGPU_INFO_FW_SMC; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + + /* SDMA */ + query_fw.fw_type = AMDGPU_INFO_FW_SDMA; + for (i = 0; i < adev->sdma.num_instances; i++) { + query_fw.index = i; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "SDMA%d feature version: %u, firmware version: 0x%08x\n", + i, fw_info.feature, fw_info.ver); + } + + return 0; +} + +static const struct drm_info_list amdgpu_firmware_info_list[] = { + {"amdgpu_firmware_info", amdgpu_debugfs_firmware_info, 0, NULL}, +}; +#endif + +int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev) +{ +#if defined(CONFIG_DEBUG_FS) + return amdgpu_debugfs_add_files(adev, amdgpu_firmware_info_list, + ARRAY_SIZE(amdgpu_firmware_info_list)); +#else + return 0; +#endif +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 7ecea83..6f0873c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -589,6 +589,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) { struct amdgpu_bo *rbo; + struct ttm_mem_reg *old_mem = &bo->mem; if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) return; @@ -602,6 +603,8 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, /* move_notify is called before move happens */ amdgpu_update_memory_usage(rbo->adev, &bo->mem, new_mem); + + trace_amdgpu_ttm_bo_move(rbo, new_mem->mem_type, old_mem->mem_type); } int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 0e13d80..ff63b88 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -347,6 +347,8 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, if (adev->pp_enabled) size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf); + else if (adev->pm.funcs->print_clock_levels) + size = adev->pm.funcs->print_clock_levels(adev, PP_SCLK, buf); return size; } @@ -363,7 +365,9 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, uint32_t i, mask = 0; char sub_str[2]; - for (i = 0; i < strlen(buf) - 1; i++) { + for (i = 0; i < strlen(buf); i++) { + if (*(buf + i) == '\n') + continue; sub_str[0] = *(buf + i); sub_str[1] = '\0'; ret = kstrtol(sub_str, 0, &level); @@ -377,6 +381,8 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, if (adev->pp_enabled) amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask); + else if (adev->pm.funcs->force_clock_level) + adev->pm.funcs->force_clock_level(adev, PP_SCLK, mask); fail: return count; } @@ -391,6 +397,8 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev, if (adev->pp_enabled) size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf); + else if (adev->pm.funcs->print_clock_levels) + size = adev->pm.funcs->print_clock_levels(adev, PP_MCLK, buf); return size; } @@ -407,7 +415,9 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, uint32_t i, mask = 0; char sub_str[2]; - for (i = 0; i < strlen(buf) - 1; i++) { + for (i = 0; i < strlen(buf); i++) { + if (*(buf + i) == '\n') + continue; sub_str[0] = *(buf + i); sub_str[1] = '\0'; ret = kstrtol(sub_str, 0, &level); @@ -421,6 +431,8 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, if (adev->pp_enabled) amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask); + else if (adev->pm.funcs->force_clock_level) + adev->pm.funcs->force_clock_level(adev, PP_MCLK, mask); fail: return count; } @@ -435,6 +447,8 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev, if (adev->pp_enabled) size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf); + else if (adev->pm.funcs->print_clock_levels) + size = adev->pm.funcs->print_clock_levels(adev, PP_PCIE, buf); return size; } @@ -451,7 +465,9 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, uint32_t i, mask = 0; char sub_str[2]; - for (i = 0; i < strlen(buf) - 1; i++) { + for (i = 0; i < strlen(buf); i++) { + if (*(buf + i) == '\n') + continue; sub_str[0] = *(buf + i); sub_str[1] = '\0'; ret = kstrtol(sub_str, 0, &level); @@ -465,6 +481,100 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, if (adev->pp_enabled) amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask); + else if (adev->pm.funcs->force_clock_level) + adev->pm.funcs->force_clock_level(adev, PP_PCIE, mask); +fail: + return count; +} + +static ssize_t amdgpu_get_pp_sclk_od(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + uint32_t value = 0; + + if (adev->pp_enabled) + value = amdgpu_dpm_get_sclk_od(adev); + else if (adev->pm.funcs->get_sclk_od) + value = adev->pm.funcs->get_sclk_od(adev); + + return snprintf(buf, PAGE_SIZE, "%d\n", value); +} + +static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + int ret; + long int value; + + ret = kstrtol(buf, 0, &value); + + if (ret) { + count = -EINVAL; + goto fail; + } + + if (adev->pp_enabled) { + amdgpu_dpm_set_sclk_od(adev, (uint32_t)value); + amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_READJUST_POWER_STATE, NULL, NULL); + } else if (adev->pm.funcs->set_sclk_od) { + adev->pm.funcs->set_sclk_od(adev, (uint32_t)value); + adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; + amdgpu_pm_compute_clocks(adev); + } + +fail: + return count; +} + +static ssize_t amdgpu_get_pp_mclk_od(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + uint32_t value = 0; + + if (adev->pp_enabled) + value = amdgpu_dpm_get_mclk_od(adev); + else if (adev->pm.funcs->get_mclk_od) + value = adev->pm.funcs->get_mclk_od(adev); + + return snprintf(buf, PAGE_SIZE, "%d\n", value); +} + +static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + int ret; + long int value; + + ret = kstrtol(buf, 0, &value); + + if (ret) { + count = -EINVAL; + goto fail; + } + + if (adev->pp_enabled) { + amdgpu_dpm_set_mclk_od(adev, (uint32_t)value); + amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_READJUST_POWER_STATE, NULL, NULL); + } else if (adev->pm.funcs->set_mclk_od) { + adev->pm.funcs->set_mclk_od(adev, (uint32_t)value); + adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; + amdgpu_pm_compute_clocks(adev); + } + fail: return count; } @@ -490,6 +600,12 @@ static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR, static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR, amdgpu_get_pp_dpm_pcie, amdgpu_set_pp_dpm_pcie); +static DEVICE_ATTR(pp_sclk_od, S_IRUGO | S_IWUSR, + amdgpu_get_pp_sclk_od, + amdgpu_set_pp_sclk_od); +static DEVICE_ATTR(pp_mclk_od, S_IRUGO | S_IWUSR, + amdgpu_get_pp_mclk_od, + amdgpu_set_pp_mclk_od); static ssize_t amdgpu_hwmon_show_temp(struct device *dev, struct device_attribute *attr, @@ -1108,22 +1224,34 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) DRM_ERROR("failed to create device file pp_table\n"); return ret; } - ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk); - if (ret) { - DRM_ERROR("failed to create device file pp_dpm_sclk\n"); - return ret; - } - ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk); - if (ret) { - DRM_ERROR("failed to create device file pp_dpm_mclk\n"); - return ret; - } - ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie); - if (ret) { - DRM_ERROR("failed to create device file pp_dpm_pcie\n"); - return ret; - } } + + ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk); + if (ret) { + DRM_ERROR("failed to create device file pp_dpm_sclk\n"); + return ret; + } + ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk); + if (ret) { + DRM_ERROR("failed to create device file pp_dpm_mclk\n"); + return ret; + } + ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie); + if (ret) { + DRM_ERROR("failed to create device file pp_dpm_pcie\n"); + return ret; + } + ret = device_create_file(adev->dev, &dev_attr_pp_sclk_od); + if (ret) { + DRM_ERROR("failed to create device file pp_sclk_od\n"); + return ret; + } + ret = device_create_file(adev->dev, &dev_attr_pp_mclk_od); + if (ret) { + DRM_ERROR("failed to create device file pp_mclk_od\n"); + return ret; + } + ret = amdgpu_debugfs_pm_init(adev); if (ret) { DRM_ERROR("Failed to register debugfs file for dpm!\n"); @@ -1146,10 +1274,12 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev) device_remove_file(adev->dev, &dev_attr_pp_cur_state); device_remove_file(adev->dev, &dev_attr_pp_force_state); device_remove_file(adev->dev, &dev_attr_pp_table); - device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk); - device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk); - device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie); } + device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk); + device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk); + device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie); + device_remove_file(adev->dev, &dev_attr_pp_sclk_od); + device_remove_file(adev->dev, &dev_attr_pp_mclk_od); } void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c index 8225655..c5738a22 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c @@ -52,6 +52,7 @@ static int amdgpu_powerplay_init(struct amdgpu_device *adev) pp_init->chip_family = adev->family; pp_init->chip_id = adev->asic_type; pp_init->device = amdgpu_cgs_create_device(adev); + pp_init->powercontainment_enabled = amdgpu_powercontainment; ret = amd_powerplay_init(pp_init, amd_pp); kfree(pp_init); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 870f949..85aeb0a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -28,6 +28,7 @@ */ #include <linux/seq_file.h> #include <linux/slab.h> +#include <linux/debugfs.h> #include <drm/drmP.h> #include <drm/amdgpu_drm.h> #include "amdgpu.h" @@ -48,6 +49,7 @@ */ static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring); +static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring); /** * amdgpu_ring_alloc - allocate space on the ring buffer @@ -73,6 +75,10 @@ int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw) ring->count_dw = ndw; ring->wptr_old = ring->wptr; + + if (ring->funcs->begin_use) + ring->funcs->begin_use(ring); + return 0; } @@ -125,6 +131,9 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring) mb(); amdgpu_ring_set_wptr(ring); + + if (ring->funcs->end_use) + ring->funcs->end_use(ring); } /** @@ -137,78 +146,9 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring) void amdgpu_ring_undo(struct amdgpu_ring *ring) { ring->wptr = ring->wptr_old; -} - -/** - * amdgpu_ring_backup - Back up the content of a ring - * - * @ring: the ring we want to back up - * - * Saves all unprocessed commits from a ring, returns the number of dwords saved. - */ -unsigned amdgpu_ring_backup(struct amdgpu_ring *ring, - uint32_t **data) -{ - unsigned size, ptr, i; - - *data = NULL; - - if (ring->ring_obj == NULL) - return 0; - - /* it doesn't make sense to save anything if all fences are signaled */ - if (!amdgpu_fence_count_emitted(ring)) - return 0; - - ptr = le32_to_cpu(*ring->next_rptr_cpu_addr); - - size = ring->wptr + (ring->ring_size / 4); - size -= ptr; - size &= ring->ptr_mask; - if (size == 0) - return 0; - - /* and then save the content of the ring */ - *data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); - if (!*data) - return 0; - for (i = 0; i < size; ++i) { - (*data)[i] = ring->ring[ptr++]; - ptr &= ring->ptr_mask; - } - - return size; -} - -/** - * amdgpu_ring_restore - append saved commands to the ring again - * - * @ring: ring to append commands to - * @size: number of dwords we want to write - * @data: saved commands - * - * Allocates space on the ring and restore the previously saved commands. - */ -int amdgpu_ring_restore(struct amdgpu_ring *ring, - unsigned size, uint32_t *data) -{ - int i, r; - - if (!size || !data) - return 0; - - /* restore the saved ring content */ - r = amdgpu_ring_alloc(ring, size); - if (r) - return r; - - for (i = 0; i < size; ++i) { - amdgpu_ring_write(ring, data[i]); - } - amdgpu_ring_commit(ring); - kfree(data); - return 0; + if (ring->funcs->end_use) + ring->funcs->end_use(ring); } /** @@ -260,14 +200,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, return r; } - r = amdgpu_wb_get(adev, &ring->next_rptr_offs); - if (r) { - dev_err(adev->dev, "(%d) ring next_rptr wb alloc failed\n", r); - return r; - } - ring->next_rptr_gpu_addr = adev->wb.gpu_addr + ring->next_rptr_offs * 4; - ring->next_rptr_cpu_addr = &adev->wb.wb[ring->next_rptr_offs]; - r = amdgpu_wb_get(adev, &ring->cond_exe_offs); if (r) { dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r); @@ -276,7 +208,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, ring->cond_exe_gpu_addr = adev->wb.gpu_addr + (ring->cond_exe_offs * 4); ring->cond_exe_cpu_addr = &adev->wb.wb[ring->cond_exe_offs]; - spin_lock_init(&ring->fence_lock); r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type); if (r) { dev_err(adev->dev, "failed initializing fences (%d).\n", r); @@ -310,6 +241,9 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, } r = amdgpu_bo_kmap(ring->ring_obj, (void **)&ring->ring); + + memset((void *)ring->ring, 0, ring->ring_size); + amdgpu_bo_unreserve(ring->ring_obj); if (r) { dev_err(adev->dev, "(%d) ring map failed\n", r); @@ -347,7 +281,6 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring) amdgpu_wb_free(ring->adev, ring->fence_offs); amdgpu_wb_free(ring->adev, ring->rptr_offs); amdgpu_wb_free(ring->adev, ring->wptr_offs); - amdgpu_wb_free(ring->adev, ring->next_rptr_offs); if (ring_obj) { r = amdgpu_bo_reserve(ring_obj, false); @@ -358,6 +291,7 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring) } amdgpu_bo_unref(&ring_obj); } + amdgpu_debugfs_ring_fini(ring); } /* @@ -365,57 +299,62 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring) */ #if defined(CONFIG_DEBUG_FS) -static int amdgpu_debugfs_ring_info(struct seq_file *m, void *data) +/* Layout of file is 12 bytes consisting of + * - rptr + * - wptr + * - driver's copy of wptr + * + * followed by n-words of ring data + */ +static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) { - struct drm_info_node *node = (struct drm_info_node *) m->private; - struct drm_device *dev = node->minor->dev; - struct amdgpu_device *adev = dev->dev_private; - int roffset = (unsigned long)node->info_ent->data; - struct amdgpu_ring *ring = (void *)(((uint8_t*)adev) + roffset); - uint32_t rptr, wptr, rptr_next; - unsigned i; - - wptr = amdgpu_ring_get_wptr(ring); - seq_printf(m, "wptr: 0x%08x [%5d]\n", wptr, wptr); - - rptr = amdgpu_ring_get_rptr(ring); - rptr_next = le32_to_cpu(*ring->next_rptr_cpu_addr); - - seq_printf(m, "rptr: 0x%08x [%5d]\n", rptr, rptr); - - seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", - ring->wptr, ring->wptr); - - if (!ring->ready) - return 0; - - /* print 8 dw before current rptr as often it's the last executed - * packet that is the root issue - */ - i = (rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask; - while (i != rptr) { - seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]); - if (i == rptr) - seq_puts(m, " *"); - if (i == rptr_next) - seq_puts(m, " #"); - seq_puts(m, "\n"); - i = (i + 1) & ring->ptr_mask; + struct amdgpu_ring *ring = (struct amdgpu_ring*)f->f_inode->i_private; + int r, i; + uint32_t value, result, early[3]; + + if (*pos & 3 || size & 3) + return -EINVAL; + + result = 0; + + if (*pos < 12) { + early[0] = amdgpu_ring_get_rptr(ring); + early[1] = amdgpu_ring_get_wptr(ring); + early[2] = ring->wptr; + for (i = *pos / 4; i < 3 && size; i++) { + r = put_user(early[i], (uint32_t *)buf); + if (r) + return r; + buf += 4; + result += 4; + size -= 4; + *pos += 4; + } } - while (i != wptr) { - seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]); - if (i == rptr) - seq_puts(m, " *"); - if (i == rptr_next) - seq_puts(m, " #"); - seq_puts(m, "\n"); - i = (i + 1) & ring->ptr_mask; + + while (size) { + if (*pos >= (ring->ring_size + 12)) + return result; + + value = ring->ring[(*pos - 12)/4]; + r = put_user(value, (uint32_t*)buf); + if (r) + return r; + buf += 4; + result += 4; + size -= 4; + *pos += 4; } - return 0; + + return result; } -static struct drm_info_list amdgpu_debugfs_ring_info_list[AMDGPU_MAX_RINGS]; -static char amdgpu_debugfs_ring_names[AMDGPU_MAX_RINGS][32]; +static const struct file_operations amdgpu_debugfs_ring_fops = { + .owner = THIS_MODULE, + .read = amdgpu_debugfs_ring_read, + .llseek = default_llseek +}; #endif @@ -423,28 +362,27 @@ static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring) { #if defined(CONFIG_DEBUG_FS) - unsigned offset = (uint8_t*)ring - (uint8_t*)adev; - unsigned i; - struct drm_info_list *info; - char *name; - - for (i = 0; i < ARRAY_SIZE(amdgpu_debugfs_ring_info_list); ++i) { - info = &amdgpu_debugfs_ring_info_list[i]; - if (!info->data) - break; - } + struct drm_minor *minor = adev->ddev->primary; + struct dentry *ent, *root = minor->debugfs_root; + char name[32]; - if (i == ARRAY_SIZE(amdgpu_debugfs_ring_info_list)) - return -ENOSPC; - - name = &amdgpu_debugfs_ring_names[i][0]; sprintf(name, "amdgpu_ring_%s", ring->name); - info->name = name; - info->show = amdgpu_debugfs_ring_info; - info->driver_features = 0; - info->data = (void*)(uintptr_t)offset; - return amdgpu_debugfs_add_files(adev, info, 1); + ent = debugfs_create_file(name, + S_IFREG | S_IRUGO, root, + ring, &amdgpu_debugfs_ring_fops); + if (IS_ERR(ent)) + return PTR_ERR(ent); + + i_size_write(ent->d_inode, ring->ring_size + 12); + ring->ent = ent; #endif return 0; } + +static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring) +{ +#if defined(CONFIG_DEBUG_FS) + debugfs_remove(ring->ent); +#endif +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c index 48618ee..d8af37a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c @@ -428,7 +428,7 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, soffset, eoffset, eoffset - soffset); if (i->fence) - seq_printf(m, " protected by 0x%08x on context %d", + seq_printf(m, " protected by 0x%08x on context %llu", i->fence->seqno, i->fence->context); seq_printf(m, "\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index 34a9280..5c8d302 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -223,13 +223,16 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, } /** - * amdgpu_sync_is_idle - test if all fences are signaled + * amdgpu_sync_peek_fence - get the next fence not signaled yet * * @sync: the sync object + * @ring: optional ring to use for test * - * Returns true if all fences in the sync object are signaled. + * Returns the next fence not signaled yet without removing it from the sync + * object. */ -bool amdgpu_sync_is_idle(struct amdgpu_sync *sync) +struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, + struct amdgpu_ring *ring) { struct amdgpu_sync_entry *e; struct hlist_node *tmp; @@ -237,6 +240,19 @@ bool amdgpu_sync_is_idle(struct amdgpu_sync *sync) hash_for_each_safe(sync->fences, i, tmp, e, node) { struct fence *f = e->fence; + struct amd_sched_fence *s_fence = to_amd_sched_fence(f); + + if (ring && s_fence) { + /* For fences from the same ring it is sufficient + * when they are scheduled. + */ + if (s_fence->sched == &ring->sched) { + if (fence_is_signaled(&s_fence->scheduled)) + continue; + + return &s_fence->scheduled; + } + } if (fence_is_signaled(f)) { hash_del(&e->node); @@ -245,58 +261,19 @@ bool amdgpu_sync_is_idle(struct amdgpu_sync *sync) continue; } - return false; + return f; } - return true; + return NULL; } /** - * amdgpu_sync_cycle_fences - move fences from one sync object into another + * amdgpu_sync_get_fence - get the next fence from the sync object * - * @dst: the destination sync object - * @src: the source sync object - * @fence: fence to add to source + * @sync: sync object to use * - * Remove all fences from source and put them into destination and add - * fence as new one into source. + * Get and removes the next fence from the sync object not signaled yet. */ -int amdgpu_sync_cycle_fences(struct amdgpu_sync *dst, struct amdgpu_sync *src, - struct fence *fence) -{ - struct amdgpu_sync_entry *e, *newone; - struct hlist_node *tmp; - int i; - - /* Allocate the new entry before moving the old ones */ - newone = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL); - if (!newone) - return -ENOMEM; - - hash_for_each_safe(src->fences, i, tmp, e, node) { - struct fence *f = e->fence; - - hash_del(&e->node); - if (fence_is_signaled(f)) { - fence_put(f); - kmem_cache_free(amdgpu_sync_slab, e); - continue; - } - - if (amdgpu_sync_add_later(dst, f)) { - kmem_cache_free(amdgpu_sync_slab, e); - continue; - } - - hash_add(dst->fences, &e->node, f->context); - } - - hash_add(src->fences, &newone->node, fence->context); - newone->fence = fence_get(fence); - - return 0; -} - struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync) { struct amdgpu_sync_entry *e; @@ -319,25 +296,6 @@ struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync) return NULL; } -int amdgpu_sync_wait(struct amdgpu_sync *sync) -{ - struct amdgpu_sync_entry *e; - struct hlist_node *tmp; - int i, r; - - hash_for_each_safe(sync->fences, i, tmp, e, node) { - r = fence_wait(e->fence, false); - if (r) - return r; - - hash_del(&e->node); - fence_put(e->fence); - kmem_cache_free(amdgpu_sync_slab, e); - } - - return 0; -} - /** * amdgpu_sync_free - free the sync object * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 26a5f4a..0d8d65e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -11,19 +11,68 @@ #define TRACE_SYSTEM amdgpu #define TRACE_INCLUDE_FILE amdgpu_trace +TRACE_EVENT(amdgpu_mm_rreg, + TP_PROTO(unsigned did, uint32_t reg, uint32_t value), + TP_ARGS(did, reg, value), + TP_STRUCT__entry( + __field(unsigned, did) + __field(uint32_t, reg) + __field(uint32_t, value) + ), + TP_fast_assign( + __entry->did = did; + __entry->reg = reg; + __entry->value = value; + ), + TP_printk("0x%04lx, 0x%04lx, 0x%08lx", + (unsigned long)__entry->did, + (unsigned long)__entry->reg, + (unsigned long)__entry->value) +); + +TRACE_EVENT(amdgpu_mm_wreg, + TP_PROTO(unsigned did, uint32_t reg, uint32_t value), + TP_ARGS(did, reg, value), + TP_STRUCT__entry( + __field(unsigned, did) + __field(uint32_t, reg) + __field(uint32_t, value) + ), + TP_fast_assign( + __entry->did = did; + __entry->reg = reg; + __entry->value = value; + ), + TP_printk("0x%04lx, 0x%04lx, 0x%08lx", + (unsigned long)__entry->did, + (unsigned long)__entry->reg, + (unsigned long)__entry->value) +); + TRACE_EVENT(amdgpu_bo_create, TP_PROTO(struct amdgpu_bo *bo), TP_ARGS(bo), TP_STRUCT__entry( __field(struct amdgpu_bo *, bo) __field(u32, pages) + __field(u32, type) + __field(u32, prefer) + __field(u32, allow) + __field(u32, visible) ), TP_fast_assign( __entry->bo = bo; __entry->pages = bo->tbo.num_pages; + __entry->type = bo->tbo.mem.mem_type; + __entry->prefer = bo->prefered_domains; + __entry->allow = bo->allowed_domains; + __entry->visible = bo->flags; ), - TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages) + + TP_printk("bo=%p,pages=%u,type=%d,prefered=%d,allowed=%d,visible=%d", + __entry->bo, __entry->pages, __entry->type, + __entry->prefer, __entry->allow, __entry->visible) ); TRACE_EVENT(amdgpu_cs, @@ -64,7 +113,7 @@ TRACE_EVENT(amdgpu_cs_ioctl, __entry->adev = job->adev; __entry->sched_job = &job->base; __entry->ib = job->ibs; - __entry->fence = &job->base.s_fence->base; + __entry->fence = &job->base.s_fence->finished; __entry->ring_name = job->ring->name; __entry->num_ibs = job->num_ibs; ), @@ -89,7 +138,7 @@ TRACE_EVENT(amdgpu_sched_run_job, __entry->adev = job->adev; __entry->sched_job = &job->base; __entry->ib = job->ibs; - __entry->fence = &job->base.s_fence->base; + __entry->fence = &job->base.s_fence->finished; __entry->ring_name = job->ring->name; __entry->num_ibs = job->num_ibs; ), @@ -100,24 +149,26 @@ TRACE_EVENT(amdgpu_sched_run_job, TRACE_EVENT(amdgpu_vm_grab_id, - TP_PROTO(struct amdgpu_vm *vm, int ring, unsigned vmid, - uint64_t pd_addr), - TP_ARGS(vm, ring, vmid, pd_addr), + TP_PROTO(struct amdgpu_vm *vm, int ring, struct amdgpu_job *job), + TP_ARGS(vm, ring, job), TP_STRUCT__entry( __field(struct amdgpu_vm *, vm) __field(u32, ring) __field(u32, vmid) __field(u64, pd_addr) + __field(u32, needs_flush) ), TP_fast_assign( __entry->vm = vm; __entry->ring = ring; - __entry->vmid = vmid; - __entry->pd_addr = pd_addr; + __entry->vmid = job->vm_id; + __entry->pd_addr = job->vm_pd_addr; + __entry->needs_flush = job->vm_needs_flush; ), - TP_printk("vm=%p, ring=%u, id=%u, pd_addr=%010Lx", __entry->vm, - __entry->ring, __entry->vmid, __entry->pd_addr) + TP_printk("vm=%p, ring=%u, id=%u, pd_addr=%010Lx needs_flush=%u", + __entry->vm, __entry->ring, __entry->vmid, + __entry->pd_addr, __entry->needs_flush) ); TRACE_EVENT(amdgpu_vm_bo_map, @@ -244,13 +295,55 @@ TRACE_EVENT(amdgpu_bo_list_set, TP_STRUCT__entry( __field(struct amdgpu_bo_list *, list) __field(struct amdgpu_bo *, bo) + __field(u64, bo_size) ), TP_fast_assign( __entry->list = list; __entry->bo = bo; + __entry->bo_size = amdgpu_bo_size(bo); ), - TP_printk("list=%p, bo=%p", __entry->list, __entry->bo) + TP_printk("list=%p, bo=%p, bo_size = %Ld", + __entry->list, + __entry->bo, + __entry->bo_size) +); + +TRACE_EVENT(amdgpu_cs_bo_status, + TP_PROTO(uint64_t total_bo, uint64_t total_size), + TP_ARGS(total_bo, total_size), + TP_STRUCT__entry( + __field(u64, total_bo) + __field(u64, total_size) + ), + + TP_fast_assign( + __entry->total_bo = total_bo; + __entry->total_size = total_size; + ), + TP_printk("total bo size = %Ld, total bo count = %Ld", + __entry->total_bo, __entry->total_size) +); + +TRACE_EVENT(amdgpu_ttm_bo_move, + TP_PROTO(struct amdgpu_bo* bo, uint32_t new_placement, uint32_t old_placement), + TP_ARGS(bo, new_placement, old_placement), + TP_STRUCT__entry( + __field(struct amdgpu_bo *, bo) + __field(u64, bo_size) + __field(u32, new_placement) + __field(u32, old_placement) + ), + + TP_fast_assign( + __entry->bo = bo; + __entry->bo_size = amdgpu_bo_size(bo); + __entry->new_placement = new_placement; + __entry->old_placement = old_placement; + ), + TP_printk("bo=%p from:%d to %d with size = %Ld", + __entry->bo, __entry->old_placement, + __entry->new_placement, __entry->bo_size) ); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 3b9053a..b7742e6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -286,9 +286,10 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, r = amdgpu_copy_buffer(ring, old_start, new_start, new_mem->num_pages * PAGE_SIZE, /* bytes */ bo->resv, &fence); - /* FIXME: handle copy error */ - r = ttm_bo_move_accel_cleanup(bo, fence, - evict, no_wait_gpu, new_mem); + if (r) + return r; + + r = ttm_bo_pipeline_move(bo, fence, evict, new_mem); fence_put(fence); return r; } @@ -396,6 +397,11 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, return -EINVAL; adev = amdgpu_get_adev(bo->bdev); + + /* remember the eviction */ + if (evict) + atomic64_inc(&adev->num_evictions); + if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { amdgpu_move_null(bo, new_mem); return 0; @@ -429,7 +435,8 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, if (r) { memcpy: - r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); + r = ttm_bo_move_memcpy(bo, evict, interruptible, + no_wait_gpu, new_mem); if (r) { return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index d9c88d13..b11f4e8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -40,9 +40,16 @@ #include "uvd/uvd_4_2_d.h" /* 1 second timeout */ -#define UVD_IDLE_TIMEOUT_MS 1000 +#define UVD_IDLE_TIMEOUT msecs_to_jiffies(1000) + +/* Firmware versions for VI */ +#define FW_1_65_10 ((1 << 24) | (65 << 16) | (10 << 8)) +#define FW_1_87_11 ((1 << 24) | (87 << 16) | (11 << 8)) +#define FW_1_87_12 ((1 << 24) | (87 << 16) | (12 << 8)) +#define FW_1_37_15 ((1 << 24) | (37 << 16) | (15 << 8)) + /* Polaris10/11 firmware version */ -#define FW_1_66_16 ((1 << 24) | (66 << 16) | (16 << 8)) +#define FW_1_66_16 ((1 << 24) | (66 << 16) | (16 << 8)) /* Firmware Names */ #ifdef CONFIG_DRM_AMDGPU_CIK @@ -92,7 +99,6 @@ MODULE_FIRMWARE(FIRMWARE_STONEY); MODULE_FIRMWARE(FIRMWARE_POLARIS10); MODULE_FIRMWARE(FIRMWARE_POLARIS11); -static void amdgpu_uvd_note_usage(struct amdgpu_device *adev); static void amdgpu_uvd_idle_work_handler(struct work_struct *work); int amdgpu_uvd_sw_init(struct amdgpu_device *adev) @@ -246,6 +252,23 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) if (!amdgpu_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0)) adev->uvd.address_64_bit = true; + switch (adev->asic_type) { + case CHIP_TONGA: + adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_65_10; + break; + case CHIP_CARRIZO: + adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_11; + break; + case CHIP_FIJI: + adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_12; + break; + case CHIP_STONEY: + adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_37_15; + break; + default: + adev->uvd.use_ctx_buf = adev->asic_type >= CHIP_POLARIS10; + } + return 0; } @@ -346,8 +369,6 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) if (handle != 0 && adev->uvd.filp[i] == filp) { struct fence *fence; - amdgpu_uvd_note_usage(adev); - r = amdgpu_uvd_get_destroy_msg(ring, handle, false, &fence); if (r) { @@ -438,7 +459,7 @@ static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg, unsigned fs_in_mb = width_in_mb * height_in_mb; unsigned image_size, tmp, min_dpb_size, num_dpb_buffer; - unsigned min_ctx_size = 0; + unsigned min_ctx_size = ~0; image_size = width * height; image_size += image_size / 2; @@ -557,7 +578,7 @@ static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg, /* reference picture buffer */ min_dpb_size = image_size * num_dpb_buffer; - if (adev->asic_type < CHIP_POLARIS10){ + if (!adev->uvd.use_ctx_buf){ /* macroblock context buffer */ min_dpb_size += width_in_mb * height_in_mb * num_dpb_buffer * 192; @@ -662,7 +683,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, } DRM_ERROR("No more free UVD handles!\n"); - return -EINVAL; + return -ENOSPC; case 1: /* it's a decode msg, calc buffer sizes */ @@ -913,8 +934,6 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx) return -EINVAL; } - amdgpu_uvd_note_usage(ctx.parser->adev); - return 0; } @@ -968,7 +987,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, if (direct) { r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); - job->fence = f; + job->fence = fence_get(f); if (r) goto err_free; @@ -1106,24 +1125,18 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work) if (fences == 0 && handles == 0) { if (adev->pm.dpm_enabled) { amdgpu_dpm_enable_uvd(adev, false); - /* just work around for uvd clock remain high even - * when uvd dpm disabled on Polaris10 */ - if (adev->asic_type == CHIP_POLARIS10) - amdgpu_asic_set_uvd_clocks(adev, 0, 0); } else { amdgpu_asic_set_uvd_clocks(adev, 0, 0); } } else { - schedule_delayed_work(&adev->uvd.idle_work, - msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS)); + schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT); } } -static void amdgpu_uvd_note_usage(struct amdgpu_device *adev) +void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring) { + struct amdgpu_device *adev = ring->adev; bool set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work); - set_clocks &= schedule_delayed_work(&adev->uvd.idle_work, - msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS)); if (set_clocks) { if (adev->pm.dpm_enabled) { @@ -1133,3 +1146,48 @@ static void amdgpu_uvd_note_usage(struct amdgpu_device *adev) } } } + +void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring) +{ + schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT); +} + +/** + * amdgpu_uvd_ring_test_ib - test ib execution + * + * @ring: amdgpu_ring pointer + * + * Test if we can successfully execute an IB + */ +int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout) +{ + struct fence *fence; + long r; + + r = amdgpu_uvd_get_create_msg(ring, 1, NULL); + if (r) { + DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r); + goto error; + } + + r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence); + if (r) { + DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r); + goto error; + } + + r = fence_wait_timeout(fence, false, timeout); + if (r == 0) { + DRM_ERROR("amdgpu: IB test timed out.\n"); + r = -ETIMEDOUT; + } else if (r < 0) { + DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); + } else { + DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + r = 0; + } + +error: + fence_put(fence); + return r; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h index 9a3b449..c850009 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h @@ -35,5 +35,8 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp); int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx); +void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring); +void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring); +int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 875626a..05865ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -36,7 +36,7 @@ #include "cikd.h" /* 1 second timeout */ -#define VCE_IDLE_TIMEOUT_MS 1000 +#define VCE_IDLE_TIMEOUT msecs_to_jiffies(1000) /* Firmware Names */ #ifdef CONFIG_DRM_AMDGPU_CIK @@ -85,8 +85,6 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) unsigned ucode_version, version_major, version_minor, binary_id; int i, r; - INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler); - switch (adev->asic_type) { #ifdef CONFIG_DRM_AMDGPU_CIK case CHIP_BONAIRE: @@ -197,6 +195,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) adev->vce.filp[i] = NULL; } + INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler); + mutex_init(&adev->vce.idle_mutex); + return 0; } @@ -220,6 +221,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev) amdgpu_ring_fini(&adev->vce.ring[1]); release_firmware(adev->vce.fw); + mutex_destroy(&adev->vce.idle_mutex); return 0; } @@ -310,37 +312,44 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work) amdgpu_asic_set_vce_clocks(adev, 0, 0); } } else { - schedule_delayed_work(&adev->vce.idle_work, - msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS)); + schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT); } } /** - * amdgpu_vce_note_usage - power up VCE + * amdgpu_vce_ring_begin_use - power up VCE * - * @adev: amdgpu_device pointer + * @ring: amdgpu ring * * Make sure VCE is powerd up when we want to use it */ -static void amdgpu_vce_note_usage(struct amdgpu_device *adev) +void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring) { - bool streams_changed = false; - bool set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work); - set_clocks &= schedule_delayed_work(&adev->vce.idle_work, - msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS)); - - if (adev->pm.dpm_enabled) { - /* XXX figure out if the streams changed */ - streams_changed = false; - } + struct amdgpu_device *adev = ring->adev; + bool set_clocks; - if (set_clocks || streams_changed) { + mutex_lock(&adev->vce.idle_mutex); + set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work); + if (set_clocks) { if (adev->pm.dpm_enabled) { amdgpu_dpm_enable_vce(adev, true); } else { amdgpu_asic_set_vce_clocks(adev, 53300, 40000); } } + mutex_unlock(&adev->vce.idle_mutex); +} + +/** + * amdgpu_vce_ring_end_use - power VCE down + * + * @ring: amdgpu ring + * + * Schedule work to power VCE down again + */ +void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring) +{ + schedule_delayed_work(&ring->adev->vce.idle_work, VCE_IDLE_TIMEOUT); } /** @@ -357,11 +366,10 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp) int i, r; for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { uint32_t handle = atomic_read(&adev->vce.handles[i]); + if (!handle || adev->vce.filp[i] != filp) continue; - amdgpu_vce_note_usage(adev); - r = amdgpu_vce_get_destroy_msg(ring, handle, false, NULL); if (r) DRM_ERROR("Error destroying VCE handle (%d)!\n", r); @@ -437,7 +445,7 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, ib->ptr[i] = 0x0; r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); - job->fence = f; + job->fence = fence_get(f); if (r) goto err; @@ -469,7 +477,6 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, struct amdgpu_job *job; struct amdgpu_ib *ib; struct fence *f = NULL; - uint64_t dummy; int i, r; r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); @@ -477,7 +484,6 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, return r; ib = &job->ibs[0]; - dummy = ib->gpu_addr + 1024; /* stitch together an VCE destroy msg */ ib->length_dw = 0; @@ -485,11 +491,14 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */ ib->ptr[ib->length_dw++] = handle; - ib->ptr[ib->length_dw++] = 0x00000014; /* len */ - ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */ - ib->ptr[ib->length_dw++] = upper_32_bits(dummy); - ib->ptr[ib->length_dw++] = dummy; - ib->ptr[ib->length_dw++] = 0x00000001; + ib->ptr[ib->length_dw++] = 0x00000020; /* len */ + ib->ptr[ib->length_dw++] = 0x00000002; /* task info */ + ib->ptr[ib->length_dw++] = 0xffffffff; /* next task info, set to 0xffffffff if no */ + ib->ptr[ib->length_dw++] = 0x00000001; /* destroy session */ + ib->ptr[ib->length_dw++] = 0x00000000; + ib->ptr[ib->length_dw++] = 0x00000000; + ib->ptr[ib->length_dw++] = 0xffffffff; /* feedback is not needed, set to 0xffffffff and firmware will not output feedback */ + ib->ptr[ib->length_dw++] = 0x00000000; ib->ptr[ib->length_dw++] = 0x00000008; /* len */ ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */ @@ -499,7 +508,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, if (direct) { r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); - job->fence = f; + job->fence = fence_get(f); if (r) goto err; @@ -580,12 +589,10 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, * we we don't have another free session index. */ static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p, - uint32_t handle, bool *allocated) + uint32_t handle, uint32_t *allocated) { unsigned i; - *allocated = false; - /* validate the handle */ for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { if (atomic_read(&p->adev->vce.handles[i]) == handle) { @@ -602,7 +609,7 @@ static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p, if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) { p->adev->vce.filp[i] = p->filp; p->adev->vce.img_size[i] = 0; - *allocated = true; + *allocated |= 1 << i; return i; } } @@ -622,15 +629,13 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) struct amdgpu_ib *ib = &p->job->ibs[ib_idx]; unsigned fb_idx = 0, bs_idx = 0; int session_idx = -1; - bool destroyed = false; - bool created = false; - bool allocated = false; + uint32_t destroyed = 0; + uint32_t created = 0; + uint32_t allocated = 0; uint32_t tmp, handle = 0; uint32_t *size = &tmp; int i, r = 0, idx = 0; - amdgpu_vce_note_usage(p->adev); - while (idx < ib->length_dw) { uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx); uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1); @@ -641,30 +646,30 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) goto out; } - if (destroyed) { - DRM_ERROR("No other command allowed after destroy!\n"); - r = -EINVAL; - goto out; - } - switch (cmd) { - case 0x00000001: // session + case 0x00000001: /* session */ handle = amdgpu_get_ib_value(p, ib_idx, idx + 2); session_idx = amdgpu_vce_validate_handle(p, handle, &allocated); - if (session_idx < 0) - return session_idx; + if (session_idx < 0) { + r = session_idx; + goto out; + } size = &p->adev->vce.img_size[session_idx]; break; - case 0x00000002: // task info + case 0x00000002: /* task info */ fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6); bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7); break; - case 0x01000001: // create - created = true; - if (!allocated) { + case 0x01000001: /* create */ + created |= 1 << session_idx; + if (destroyed & (1 << session_idx)) { + destroyed &= ~(1 << session_idx); + allocated |= 1 << session_idx; + + } else if (!(allocated & (1 << session_idx))) { DRM_ERROR("Handle already in use!\n"); r = -EINVAL; goto out; @@ -675,16 +680,16 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) 8 * 3 / 2; break; - case 0x04000001: // config extension - case 0x04000002: // pic control - case 0x04000005: // rate control - case 0x04000007: // motion estimation - case 0x04000008: // rdo - case 0x04000009: // vui - case 0x05000002: // auxiliary buffer + case 0x04000001: /* config extension */ + case 0x04000002: /* pic control */ + case 0x04000005: /* rate control */ + case 0x04000007: /* motion estimation */ + case 0x04000008: /* rdo */ + case 0x04000009: /* vui */ + case 0x05000002: /* auxiliary buffer */ break; - case 0x03000001: // encode + case 0x03000001: /* encode */ r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9, *size, 0); if (r) @@ -696,18 +701,18 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) goto out; break; - case 0x02000001: // destroy - destroyed = true; + case 0x02000001: /* destroy */ + destroyed |= 1 << session_idx; break; - case 0x05000001: // context buffer + case 0x05000001: /* context buffer */ r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2, *size * 2, 0); if (r) goto out; break; - case 0x05000004: // video bitstream buffer + case 0x05000004: /* video bitstream buffer */ tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4); r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2, tmp, bs_idx); @@ -715,7 +720,7 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) goto out; break; - case 0x05000005: // feedback buffer + case 0x05000005: /* feedback buffer */ r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2, 4096, fb_idx); if (r) @@ -737,21 +742,24 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) idx += len / 4; } - if (allocated && !created) { + if (allocated & ~created) { DRM_ERROR("New session without create command!\n"); r = -ENOENT; } out: - if ((!r && destroyed) || (r && allocated)) { - /* - * IB contains a destroy msg or we have allocated an - * handle and got an error, anyway free the handle - */ - for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) - atomic_cmpxchg(&p->adev->vce.handles[i], handle, 0); + if (!r) { + /* No error, free all destroyed handle slots */ + tmp = destroyed; + } else { + /* Error during parsing, free all allocated handle slots */ + tmp = allocated; } + for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) + if (tmp & (1 << i)) + atomic_set(&p->adev->vce.handles[i], 0); + return r; } @@ -837,10 +845,10 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring) * @ring: the engine to test on * */ -int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring) +int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct fence *fence = NULL; - int r; + long r; /* skip vce ring1 ib test for now, since it's not reliable */ if (ring == &ring->adev->vce.ring[1]) @@ -848,21 +856,25 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring) r = amdgpu_vce_get_create_msg(ring, 1, NULL); if (r) { - DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r); + DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r); goto error; } r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence); if (r) { - DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r); + DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r); goto error; } - r = fence_wait(fence, false); - if (r) { - DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); + r = fence_wait_timeout(fence, false, timeout); + if (r == 0) { + DRM_ERROR("amdgpu: IB test timed out.\n"); + r = -ETIMEDOUT; + } else if (r < 0) { + DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); } else { DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + r = 0; } error: fence_put(fence); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h index f40cf76..63f83d0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h @@ -39,6 +39,8 @@ void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, unsigned flags); int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring); -int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring); +int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout); +void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring); +void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 9f36ed3..8e642fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -25,6 +25,7 @@ * Alex Deucher * Jerome Glisse */ +#include <linux/fence-array.h> #include <drm/drmP.h> #include <drm/amdgpu_drm.h> #include "amdgpu.h" @@ -114,16 +115,26 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, /** * amdgpu_vm_get_bos - add the vm BOs to a duplicates list * + * @adev: amdgpu device pointer * @vm: vm providing the BOs * @duplicates: head of duplicates list * * Add the page directory to the BO duplicates list * for command submission. */ -void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates) +void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, + struct list_head *duplicates) { + uint64_t num_evictions; unsigned i; + /* We only need to validate the page tables + * if they aren't already valid. + */ + num_evictions = atomic64_read(&adev->num_evictions); + if (num_evictions == vm->last_eviction_counter) + return; + /* add the vm page table to the list */ for (i = 0; i <= vm->max_pde_used; ++i) { struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry; @@ -162,6 +173,13 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, spin_unlock(&glob->lru_lock); } +static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device *adev, + struct amdgpu_vm_id *id) +{ + return id->current_gpu_reset_count != + atomic_read(&adev->gpu_reset_counter) ? true : false; +} + /** * amdgpu_vm_grab_id - allocate the next free VMID * @@ -174,18 +192,67 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, */ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, struct amdgpu_sync *sync, struct fence *fence, - unsigned *vm_id, uint64_t *vm_pd_addr) + struct amdgpu_job *job) { - uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); struct amdgpu_device *adev = ring->adev; + uint64_t fence_context = adev->fence_context + ring->idx; struct fence *updates = sync->last_vm_update; - struct amdgpu_vm_id *id; - unsigned i = ring->idx; - int r; + struct amdgpu_vm_id *id, *idle; + struct fence **fences; + unsigned i; + int r = 0; + + fences = kmalloc_array(sizeof(void *), adev->vm_manager.num_ids, + GFP_KERNEL); + if (!fences) + return -ENOMEM; mutex_lock(&adev->vm_manager.lock); + /* Check if we have an idle VMID */ + i = 0; + list_for_each_entry(idle, &adev->vm_manager.ids_lru, list) { + fences[i] = amdgpu_sync_peek_fence(&idle->active, ring); + if (!fences[i]) + break; + ++i; + } + + /* If we can't find a idle VMID to use, wait till one becomes available */ + if (&idle->list == &adev->vm_manager.ids_lru) { + u64 fence_context = adev->vm_manager.fence_context + ring->idx; + unsigned seqno = ++adev->vm_manager.seqno[ring->idx]; + struct fence_array *array; + unsigned j; + + for (j = 0; j < i; ++j) + fence_get(fences[j]); + + array = fence_array_create(i, fences, fence_context, + seqno, true); + if (!array) { + for (j = 0; j < i; ++j) + fence_put(fences[j]); + kfree(fences); + r = -ENOMEM; + goto error; + } + + + r = amdgpu_sync_fence(ring->adev, sync, &array->base); + fence_put(&array->base); + if (r) + goto error; + + mutex_unlock(&adev->vm_manager.lock); + return 0; + + } + kfree(fences); + + job->vm_needs_flush = true; /* Check if we can use a VMID already assigned to this VM */ + i = ring->idx; do { struct fence *flushed; @@ -196,67 +263,52 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, /* Check all the prerequisites to using this VMID */ if (!id) continue; + if (amdgpu_vm_is_gpu_reset(adev, id)) + continue; if (atomic64_read(&id->owner) != vm->client_id) continue; - if (pd_addr != id->pd_gpu_addr) + if (job->vm_pd_addr != id->pd_gpu_addr) continue; - if (id->last_user != ring && - (!id->last_flush || !fence_is_signaled(id->last_flush))) + if (!id->last_flush) continue; - flushed = id->flushed_updates; - if (updates && (!flushed || fence_is_later(updates, flushed))) + if (id->last_flush->context != fence_context && + !fence_is_signaled(id->last_flush)) continue; - /* Good we can use this VMID */ - if (id->last_user == ring) { - r = amdgpu_sync_fence(ring->adev, sync, - id->first); - if (r) - goto error; - } + flushed = id->flushed_updates; + if (updates && + (!flushed || fence_is_later(updates, flushed))) + continue; - /* And remember this submission as user of the VMID */ + /* Good we can use this VMID. Remember this submission as + * user of the VMID. + */ r = amdgpu_sync_fence(ring->adev, &id->active, fence); if (r) goto error; + id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter); list_move_tail(&id->list, &adev->vm_manager.ids_lru); vm->ids[ring->idx] = id; - *vm_id = id - adev->vm_manager.ids; - *vm_pd_addr = AMDGPU_VM_NO_FLUSH; - trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr); + job->vm_id = id - adev->vm_manager.ids; + job->vm_needs_flush = false; + trace_amdgpu_vm_grab_id(vm, ring->idx, job); mutex_unlock(&adev->vm_manager.lock); return 0; } while (i != ring->idx); - id = list_first_entry(&adev->vm_manager.ids_lru, - struct amdgpu_vm_id, - list); - - if (!amdgpu_sync_is_idle(&id->active)) { - struct list_head *head = &adev->vm_manager.ids_lru; - struct amdgpu_vm_id *tmp; + /* Still no ID to use? Then use the idle one found earlier */ + id = idle; - list_for_each_entry_safe(id, tmp, &adev->vm_manager.ids_lru, - list) { - if (amdgpu_sync_is_idle(&id->active)) { - list_move(&id->list, head); - head = &id->list; - } - } - id = list_first_entry(&adev->vm_manager.ids_lru, - struct amdgpu_vm_id, - list); - } - - r = amdgpu_sync_cycle_fences(sync, &id->active, fence); + /* Remember this submission as user of the VMID */ + r = amdgpu_sync_fence(ring->adev, &id->active, fence); if (r) goto error; @@ -269,22 +321,46 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, fence_put(id->flushed_updates); id->flushed_updates = fence_get(updates); - id->pd_gpu_addr = pd_addr; - + id->pd_gpu_addr = job->vm_pd_addr; + id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter); list_move_tail(&id->list, &adev->vm_manager.ids_lru); - id->last_user = ring; atomic64_set(&id->owner, vm->client_id); vm->ids[ring->idx] = id; - *vm_id = id - adev->vm_manager.ids; - *vm_pd_addr = pd_addr; - trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr); + job->vm_id = id - adev->vm_manager.ids; + trace_amdgpu_vm_grab_id(vm, ring->idx, job); error: mutex_unlock(&adev->vm_manager.lock); return r; } +static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + const struct amdgpu_ip_block_version *ip_block; + + if (ring->type != AMDGPU_RING_TYPE_COMPUTE) + /* only compute rings */ + return false; + + ip_block = amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX); + if (!ip_block) + return false; + + if (ip_block->major <= 7) { + /* gfx7 has no workaround */ + return true; + } else if (ip_block->major == 8) { + if (adev->gfx.mec_fw_version >= 673) + /* gfx8 is fixed in MEC firmware 673 */ + return false; + else + return true; + } + return false; +} + /** * amdgpu_vm_flush - hardware flush the vm * @@ -294,59 +370,52 @@ error: * * Emit a VM flush when it is necessary. */ -int amdgpu_vm_flush(struct amdgpu_ring *ring, - unsigned vm_id, uint64_t pd_addr, - uint32_t gds_base, uint32_t gds_size, - uint32_t gws_base, uint32_t gws_size, - uint32_t oa_base, uint32_t oa_size) +int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job) { struct amdgpu_device *adev = ring->adev; - struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id]; + struct amdgpu_vm_id *id = &adev->vm_manager.ids[job->vm_id]; bool gds_switch_needed = ring->funcs->emit_gds_switch && ( - id->gds_base != gds_base || - id->gds_size != gds_size || - id->gws_base != gws_base || - id->gws_size != gws_size || - id->oa_base != oa_base || - id->oa_size != oa_size); + id->gds_base != job->gds_base || + id->gds_size != job->gds_size || + id->gws_base != job->gws_base || + id->gws_size != job->gws_size || + id->oa_base != job->oa_base || + id->oa_size != job->oa_size); int r; if (ring->funcs->emit_pipeline_sync && ( - pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed || - ring->type == AMDGPU_RING_TYPE_COMPUTE)) + job->vm_needs_flush || gds_switch_needed || + amdgpu_vm_ring_has_compute_vm_bug(ring))) amdgpu_ring_emit_pipeline_sync(ring); - if (ring->funcs->emit_vm_flush && - pd_addr != AMDGPU_VM_NO_FLUSH) { + if (ring->funcs->emit_vm_flush && (job->vm_needs_flush || + amdgpu_vm_is_gpu_reset(adev, id))) { struct fence *fence; - trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id); - amdgpu_ring_emit_vm_flush(ring, vm_id, pd_addr); + trace_amdgpu_vm_flush(job->vm_pd_addr, ring->idx, job->vm_id); + amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr); + + r = amdgpu_fence_emit(ring, &fence); + if (r) + return r; mutex_lock(&adev->vm_manager.lock); - if ((id->pd_gpu_addr == pd_addr) && (id->last_user == ring)) { - r = amdgpu_fence_emit(ring, &fence); - if (r) { - mutex_unlock(&adev->vm_manager.lock); - return r; - } - fence_put(id->last_flush); - id->last_flush = fence; - } + fence_put(id->last_flush); + id->last_flush = fence; mutex_unlock(&adev->vm_manager.lock); } if (gds_switch_needed) { - id->gds_base = gds_base; - id->gds_size = gds_size; - id->gws_base = gws_base; - id->gws_size = gws_size; - id->oa_base = oa_base; - id->oa_size = oa_size; - amdgpu_ring_emit_gds_switch(ring, vm_id, - gds_base, gds_size, - gws_base, gws_size, - oa_base, oa_size); + id->gds_base = job->gds_base; + id->gds_size = job->gds_size; + id->gws_base = job->gws_base; + id->gws_size = job->gws_size; + id->oa_base = job->oa_base; + id->oa_size = job->oa_size; + amdgpu_ring_emit_gds_switch(ring, job->vm_id, + job->gds_base, job->gds_size, + job->gws_base, job->gws_size, + job->oa_base, job->oa_size); } return 0; @@ -723,7 +792,7 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, * @vm: requested vm * @start: start of GPU address range * @end: end of GPU address range - * @dst: destination address to map to + * @dst: destination address to map to, the next dst inside the function * @flags: mapping flags * * Update the page tables in the range @start - @end. @@ -737,49 +806,75 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, { const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1; - uint64_t last_pe_start = ~0, last_pe_end = ~0, last_dst = ~0; - uint64_t addr; + uint64_t cur_pe_start, cur_pe_end, cur_dst; + uint64_t addr; /* next GPU address to be updated */ + uint64_t pt_idx; + struct amdgpu_bo *pt; + unsigned nptes; /* next number of ptes to be updated */ + uint64_t next_pe_start; + + /* initialize the variables */ + addr = start; + pt_idx = addr >> amdgpu_vm_block_size; + pt = vm->page_tables[pt_idx].entry.robj; + + if ((addr & ~mask) == (end & ~mask)) + nptes = end - addr; + else + nptes = AMDGPU_VM_PTE_COUNT - (addr & mask); + + cur_pe_start = amdgpu_bo_gpu_offset(pt); + cur_pe_start += (addr & mask) * 8; + cur_pe_end = cur_pe_start + 8 * nptes; + cur_dst = dst; + + /* for next ptb*/ + addr += nptes; + dst += nptes * AMDGPU_GPU_PAGE_SIZE; /* walk over the address space and update the page tables */ - for (addr = start; addr < end; ) { - uint64_t pt_idx = addr >> amdgpu_vm_block_size; - struct amdgpu_bo *pt = vm->page_tables[pt_idx].entry.robj; - unsigned nptes; - uint64_t pe_start; + while (addr < end) { + pt_idx = addr >> amdgpu_vm_block_size; + pt = vm->page_tables[pt_idx].entry.robj; if ((addr & ~mask) == (end & ~mask)) nptes = end - addr; else nptes = AMDGPU_VM_PTE_COUNT - (addr & mask); - pe_start = amdgpu_bo_gpu_offset(pt); - pe_start += (addr & mask) * 8; - - if (last_pe_end != pe_start) { + next_pe_start = amdgpu_bo_gpu_offset(pt); + next_pe_start += (addr & mask) * 8; + if (cur_pe_end == next_pe_start) { + /* The next ptb is consecutive to current ptb. + * Don't call amdgpu_vm_frag_ptes now. + * Will update two ptbs together in future. + */ + cur_pe_end += 8 * nptes; + } else { amdgpu_vm_frag_ptes(adev, vm_update_params, - last_pe_start, last_pe_end, - last_dst, flags); + cur_pe_start, cur_pe_end, + cur_dst, flags); - last_pe_start = pe_start; - last_pe_end = pe_start + 8 * nptes; - last_dst = dst; - } else { - last_pe_end += 8 * nptes; + cur_pe_start = next_pe_start; + cur_pe_end = next_pe_start + 8 * nptes; + cur_dst = dst; } + /* for next ptb*/ addr += nptes; dst += nptes * AMDGPU_GPU_PAGE_SIZE; } - amdgpu_vm_frag_ptes(adev, vm_update_params, last_pe_start, - last_pe_end, last_dst, flags); + amdgpu_vm_frag_ptes(adev, vm_update_params, cur_pe_start, + cur_pe_end, cur_dst, flags); } /** * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table * * @adev: amdgpu_device pointer + * @exclusive: fence we need to sync to * @src: address where to copy page table entries from * @pages_addr: DMA addresses to use for mapping * @vm: requested vm @@ -793,6 +888,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, * Returns 0 for success, -EINVAL for failure. */ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, + struct fence *exclusive, uint64_t src, dma_addr_t *pages_addr, struct amdgpu_vm *vm, @@ -853,6 +949,10 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, vm_update_params.ib = &job->ibs[0]; + r = amdgpu_sync_fence(adev, &job->sync, exclusive); + if (r) + goto error_free; + r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv, owner); if (r) @@ -889,6 +989,7 @@ error_free: * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks * * @adev: amdgpu_device pointer + * @exclusive: fence we need to sync to * @gtt_flags: flags as they are used for GTT * @pages_addr: DMA addresses to use for mapping * @vm: requested vm @@ -902,6 +1003,7 @@ error_free: * Returns 0 for success, -EINVAL for failure. */ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, + struct fence *exclusive, uint32_t gtt_flags, dma_addr_t *pages_addr, struct amdgpu_vm *vm, @@ -932,7 +1034,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, addr += mapping->offset; if (!pages_addr || src) - return amdgpu_vm_bo_update_mapping(adev, src, pages_addr, vm, + return amdgpu_vm_bo_update_mapping(adev, exclusive, + src, pages_addr, vm, start, mapping->it.last, flags, addr, fence); @@ -940,7 +1043,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, uint64_t last; last = min((uint64_t)mapping->it.last, start + max_size - 1); - r = amdgpu_vm_bo_update_mapping(adev, src, pages_addr, vm, + r = amdgpu_vm_bo_update_mapping(adev, exclusive, + src, pages_addr, vm, start, last, flags, addr, fence); if (r) @@ -973,6 +1077,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va_mapping *mapping; dma_addr_t *pages_addr = NULL; uint32_t gtt_flags, flags; + struct fence *exclusive; uint64_t addr; int r; @@ -994,8 +1099,11 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, default: break; } + + exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv); } else { addr = 0; + exclusive = NULL; } flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem); @@ -1007,7 +1115,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, spin_unlock(&vm->status_lock); list_for_each_entry(mapping, &bo_va->invalids, list) { - r = amdgpu_vm_bo_split_mapping(adev, gtt_flags, pages_addr, vm, + r = amdgpu_vm_bo_split_mapping(adev, exclusive, + gtt_flags, pages_addr, vm, mapping, flags, addr, &bo_va->last_pt_update); if (r) @@ -1054,7 +1163,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, struct amdgpu_bo_va_mapping, list); list_del(&mapping->list); - r = amdgpu_vm_bo_split_mapping(adev, 0, NULL, vm, mapping, + r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, NULL, vm, mapping, 0, 0, NULL); kfree(mapping); if (r) @@ -1445,6 +1554,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) amdgpu_bo_unreserve(vm->page_directory); if (r) goto error_free_page_directory; + vm->last_eviction_counter = atomic64_read(&adev->num_evictions); return 0; @@ -1516,6 +1626,10 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev) &adev->vm_manager.ids_lru); } + adev->vm_manager.fence_context = fence_context_alloc(AMDGPU_MAX_RINGS); + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) + adev->vm_manager.seqno[i] = 0; + atomic_set(&adev->vm_manager.vm_pte_next_ring, 0); atomic64_set(&adev->vm_manager.client_counter, 0); } diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c index 48b6bd6..c32eca2 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c @@ -98,6 +98,7 @@ amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encode case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: if (dig->backlight_level == 0) amdgpu_atombios_encoder_setup_dig_transmitter(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index 5ec1f1e..e2f0e5d 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c @@ -50,7 +50,9 @@ #include "gmc/gmc_7_1_sh_mask.h" MODULE_FIRMWARE("radeon/bonaire_smc.bin"); +MODULE_FIRMWARE("radeon/bonaire_k_smc.bin"); MODULE_FIRMWARE("radeon/hawaii_smc.bin"); +MODULE_FIRMWARE("radeon/hawaii_k_smc.bin"); #define MC_CG_ARB_FREQ_F0 0x0a #define MC_CG_ARB_FREQ_F1 0x0b @@ -84,12 +86,14 @@ static const struct ci_pt_defaults defaults_bonaire_xt = { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } }; +#if 0 static const struct ci_pt_defaults defaults_bonaire_pro = { 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062, { 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F }, { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB } }; +#endif static const struct ci_pt_defaults defaults_saturn_xt = { @@ -98,12 +102,14 @@ static const struct ci_pt_defaults defaults_saturn_xt = { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 } }; +#if 0 static const struct ci_pt_defaults defaults_saturn_pro = { 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000, { 0x96, 0x21D, 0x23B, 0xA1, 0x85, 0x87, 0x83, 0x84, 0x81, 0xE6, 0xE6, 0xE6, 0x71, 0x6A, 0x6A }, { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 } }; +#endif static const struct ci_pt_config_reg didt_config_ci[] = { @@ -736,19 +742,19 @@ static int ci_enable_didt(struct amdgpu_device *adev, bool enable) if (pi->caps_sq_ramping || pi->caps_db_ramping || pi->caps_td_ramping || pi->caps_tcp_ramping) { - gfx_v7_0_enter_rlc_safe_mode(adev); + adev->gfx.rlc.funcs->enter_safe_mode(adev); if (enable) { ret = ci_program_pt_config_registers(adev, didt_config_ci); if (ret) { - gfx_v7_0_exit_rlc_safe_mode(adev); + adev->gfx.rlc.funcs->exit_safe_mode(adev); return ret; } } ci_do_enable_didt(adev, enable); - gfx_v7_0_exit_rlc_safe_mode(adev); + adev->gfx.rlc.funcs->exit_safe_mode(adev); } return 0; @@ -3030,7 +3036,7 @@ static int ci_populate_single_memory_level(struct amdgpu_device *adev, if (pi->mclk_stutter_mode_threshold && (memory_clock <= pi->mclk_stutter_mode_threshold) && - (pi->uvd_enabled == false) && + (!pi->uvd_enabled) && (RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) && (adev->pm.dpm.new_active_crtc_count <= 2)) memory_level->StutterEnable = true; @@ -3636,6 +3642,10 @@ static int ci_setup_default_dpm_tables(struct amdgpu_device *adev) ci_setup_default_pcie_tables(adev); + /* save a copy of the default DPM table */ + memcpy(&(pi->golden_dpm_table), &(pi->dpm_table), + sizeof(struct ci_dpm_table)); + return 0; } @@ -5754,10 +5764,18 @@ static int ci_dpm_init_microcode(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_BONAIRE: - chip_name = "bonaire"; + if ((adev->pdev->revision == 0x80) || + (adev->pdev->revision == 0x81) || + (adev->pdev->device == 0x665f)) + chip_name = "bonaire_k"; + else + chip_name = "bonaire"; break; case CHIP_HAWAII: - chip_name = "hawaii"; + if (adev->pdev->revision == 0x80) + chip_name = "hawaii_k"; + else + chip_name = "hawaii"; break; case CHIP_KAVERI: case CHIP_KABINI: @@ -6404,6 +6422,186 @@ static int ci_dpm_set_powergating_state(void *handle, return 0; } +static int ci_dpm_print_clock_levels(struct amdgpu_device *adev, + enum pp_clock_type type, char *buf) +{ + struct ci_power_info *pi = ci_get_pi(adev); + struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table; + struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table; + struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table; + + int i, now, size = 0; + uint32_t clock, pcie_speed; + + switch (type) { + case PP_SCLK: + amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetSclkFrequency); + clock = RREG32(mmSMC_MSG_ARG_0); + + for (i = 0; i < sclk_table->count; i++) { + if (clock > sclk_table->dpm_levels[i].value) + continue; + break; + } + now = i; + + for (i = 0; i < sclk_table->count; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", + i, sclk_table->dpm_levels[i].value / 100, + (i == now) ? "*" : ""); + break; + case PP_MCLK: + amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetMclkFrequency); + clock = RREG32(mmSMC_MSG_ARG_0); + + for (i = 0; i < mclk_table->count; i++) { + if (clock > mclk_table->dpm_levels[i].value) + continue; + break; + } + now = i; + + for (i = 0; i < mclk_table->count; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", + i, mclk_table->dpm_levels[i].value / 100, + (i == now) ? "*" : ""); + break; + case PP_PCIE: + pcie_speed = ci_get_current_pcie_speed(adev); + for (i = 0; i < pcie_table->count; i++) { + if (pcie_speed != pcie_table->dpm_levels[i].value) + continue; + break; + } + now = i; + + for (i = 0; i < pcie_table->count; i++) + size += sprintf(buf + size, "%d: %s %s\n", i, + (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x1" : + (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" : + (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "", + (i == now) ? "*" : ""); + break; + default: + break; + } + + return size; +} + +static int ci_dpm_force_clock_level(struct amdgpu_device *adev, + enum pp_clock_type type, uint32_t mask) +{ + struct ci_power_info *pi = ci_get_pi(adev); + + if (adev->pm.dpm.forced_level + != AMDGPU_DPM_FORCED_LEVEL_MANUAL) + return -EINVAL; + + switch (type) { + case PP_SCLK: + if (!pi->sclk_dpm_key_disabled) + amdgpu_ci_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_SCLKDPM_SetEnabledMask, + pi->dpm_level_enable_mask.sclk_dpm_enable_mask & mask); + break; + + case PP_MCLK: + if (!pi->mclk_dpm_key_disabled) + amdgpu_ci_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_MCLKDPM_SetEnabledMask, + pi->dpm_level_enable_mask.mclk_dpm_enable_mask & mask); + break; + + case PP_PCIE: + { + uint32_t tmp = mask & pi->dpm_level_enable_mask.pcie_dpm_enable_mask; + uint32_t level = 0; + + while (tmp >>= 1) + level++; + + if (!pi->pcie_dpm_key_disabled) + amdgpu_ci_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_PCIeDPM_ForceLevel, + level); + break; + } + default: + break; + } + + return 0; +} + +static int ci_dpm_get_sclk_od(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + struct ci_single_dpm_table *sclk_table = &(pi->dpm_table.sclk_table); + struct ci_single_dpm_table *golden_sclk_table = + &(pi->golden_dpm_table.sclk_table); + int value; + + value = (sclk_table->dpm_levels[sclk_table->count - 1].value - + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) * + 100 / + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; + + return value; +} + +static int ci_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value) +{ + struct ci_power_info *pi = ci_get_pi(adev); + struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps); + struct ci_single_dpm_table *golden_sclk_table = + &(pi->golden_dpm_table.sclk_table); + + if (value > 20) + value = 20; + + ps->performance_levels[ps->performance_level_count - 1].sclk = + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * + value / 100 + + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; + + return 0; +} + +static int ci_dpm_get_mclk_od(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + struct ci_single_dpm_table *mclk_table = &(pi->dpm_table.mclk_table); + struct ci_single_dpm_table *golden_mclk_table = + &(pi->golden_dpm_table.mclk_table); + int value; + + value = (mclk_table->dpm_levels[mclk_table->count - 1].value - + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) * + 100 / + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; + + return value; +} + +static int ci_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value) +{ + struct ci_power_info *pi = ci_get_pi(adev); + struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps); + struct ci_single_dpm_table *golden_mclk_table = + &(pi->golden_dpm_table.mclk_table); + + if (value > 20) + value = 20; + + ps->performance_levels[ps->performance_level_count - 1].mclk = + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value * + value / 100 + + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; + + return 0; +} + const struct amd_ip_funcs ci_dpm_ip_funcs = { .name = "ci_dpm", .early_init = ci_dpm_early_init, @@ -6438,6 +6636,12 @@ static const struct amdgpu_dpm_funcs ci_dpm_funcs = { .get_fan_control_mode = &ci_dpm_get_fan_control_mode, .set_fan_speed_percent = &ci_dpm_set_fan_speed_percent, .get_fan_speed_percent = &ci_dpm_get_fan_speed_percent, + .print_clock_levels = ci_dpm_print_clock_levels, + .force_clock_level = ci_dpm_force_clock_level, + .get_sclk_od = ci_dpm_get_sclk_od, + .set_sclk_od = ci_dpm_set_sclk_od, + .get_mclk_od = ci_dpm_get_mclk_od, + .set_mclk_od = ci_dpm_set_mclk_od, }; static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.h b/drivers/gpu/drm/amd/amdgpu/ci_dpm.h index faccc30..91be299 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.h +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.h @@ -193,6 +193,7 @@ struct ci_pt_defaults { struct ci_power_info { struct ci_dpm_table dpm_table; + struct ci_dpm_table golden_dpm_table; u32 voltage_control; u32 mvdd_control; u32 vddci_control; diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 9104318..4efc901 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -879,7 +879,7 @@ static void cik_vga_set_state(struct amdgpu_device *adev, bool state) uint32_t tmp; tmp = RREG32(mmCONFIG_CNTL); - if (state == false) + if (!state) tmp |= CONFIG_CNTL__VGA_DIS_MASK; else tmp &= ~CONFIG_CNTL__VGA_DIS_MASK; @@ -1035,12 +1035,12 @@ static uint32_t cik_read_indexed_register(struct amdgpu_device *adev, mutex_lock(&adev->grbm_idx_mutex); if (se_num != 0xffffffff || sh_num != 0xffffffff) - gfx_v7_0_select_se_sh(adev, se_num, sh_num); + amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); val = RREG32(reg_offset); if (se_num != 0xffffffff || sh_num != 0xffffffff) - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); return val; } @@ -1158,10 +1158,11 @@ static void kv_restore_regs_for_reset(struct amdgpu_device *adev, WREG32(mmGMCON_RENG_EXECUTE, save->gmcon_reng_execute); } -static void cik_gpu_pci_config_reset(struct amdgpu_device *adev) +static int cik_gpu_pci_config_reset(struct amdgpu_device *adev) { struct kv_reset_save_regs kv_save = { 0 }; u32 i; + int r = -EINVAL; dev_info(adev->dev, "GPU pci config reset\n"); @@ -1177,14 +1178,20 @@ static void cik_gpu_pci_config_reset(struct amdgpu_device *adev) /* wait for asic to come out of reset */ for (i = 0; i < adev->usec_timeout; i++) { - if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) + if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) { + /* enable BM */ + pci_set_master(adev->pdev); + r = 0; break; + } udelay(1); } /* does asic init need to be run first??? */ if (adev->flags & AMD_IS_APU) kv_restore_regs_for_reset(adev, &kv_save); + + return r; } static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung) @@ -1210,13 +1217,14 @@ static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hu */ static int cik_asic_reset(struct amdgpu_device *adev) { + int r; cik_set_bios_scratch_engine_hung(adev, true); - cik_gpu_pci_config_reset(adev); + r = cik_gpu_pci_config_reset(adev); cik_set_bios_scratch_engine_hung(adev, false); - return 0; + return r; } static int cik_set_uvd_clock(struct amdgpu_device *adev, u32 clock, @@ -2014,9 +2022,6 @@ static const struct amdgpu_asic_funcs cik_asic_funcs = .set_uvd_clocks = &cik_set_uvd_clocks, .set_vce_clocks = &cik_set_vce_clocks, .get_virtual_caps = &cik_get_virtual_caps, - /* these should be moved to their own ip modules */ - .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter, - .wait_for_mc_idle = &gmc_v7_0_mc_wait_for_idle, }; static int cik_common_early_init(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index 9dc4e24..ee64669 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -224,17 +224,6 @@ static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring, unsigned vm_id, bool ctx_switch) { u32 extra_bits = vm_id & 0xf; - u32 next_rptr = ring->wptr + 5; - - while ((next_rptr & 7) != 4) - next_rptr++; - - next_rptr += 4; - amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0)); - amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); - amdgpu_ring_write(ring, 1); /* number of DWs to follow */ - amdgpu_ring_write(ring, next_rptr); /* IB packet must end on a 8 DW boundary */ cik_sdma_ring_insert_nop(ring, (12 - (ring->wptr & 7)) % 8); @@ -365,7 +354,7 @@ static void cik_sdma_enable(struct amdgpu_device *adev, bool enable) u32 me_cntl; int i; - if (enable == false) { + if (!enable) { cik_sdma_gfx_stop(adev); cik_sdma_rlc_stop(adev); } @@ -628,20 +617,19 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring) * Test a simple IB in the DMA ring (CIK). * Returns 0 on success, error on failure. */ -static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring) +static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; struct fence *f = NULL; - unsigned i; unsigned index; - int r; u32 tmp = 0; u64 gpu_addr; + long r; r = amdgpu_wb_get(adev, &index); if (r) { - dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); + dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); return r; } @@ -651,11 +639,12 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring) memset(&ib, 0, sizeof(ib)); r = amdgpu_ib_get(adev, NULL, 256, &ib); if (r) { - DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); + DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); goto err0; } - ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); + ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, + SDMA_WRITE_SUB_OPCODE_LINEAR, 0); ib.ptr[1] = lower_32_bits(gpu_addr); ib.ptr[2] = upper_32_bits(gpu_addr); ib.ptr[3] = 1; @@ -665,28 +654,25 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring) if (r) goto err1; - r = fence_wait(f, false); - if (r) { - DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); + r = fence_wait_timeout(f, false, timeout); + if (r == 0) { + DRM_ERROR("amdgpu: IB test timed out\n"); + r = -ETIMEDOUT; goto err1; - } - for (i = 0; i < adev->usec_timeout; i++) { - tmp = le32_to_cpu(adev->wb.wb[index]); - if (tmp == 0xDEADBEEF) - break; - DRM_UDELAY(1); - } - if (i < adev->usec_timeout) { - DRM_INFO("ib test on ring %d succeeded in %u usecs\n", - ring->idx, i); + } else if (r < 0) { + DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); goto err1; + } + tmp = le32_to_cpu(adev->wb.wb[index]); + if (tmp == 0xDEADBEEF) { + DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + r = 0; } else { DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); r = -EINVAL; } err1: - fence_put(f); amdgpu_ib_free(adev, &ib, NULL); fence_put(f); err0: diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c index 933e425..2a11413 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c @@ -425,7 +425,7 @@ static int cz_dpm_init(struct amdgpu_device *adev) pi->mgcg_cgtt_local1 = 0x0; pi->clock_slow_down_step = 25000; pi->skip_clock_slow_down = 1; - pi->enable_nb_ps_policy = 0; + pi->enable_nb_ps_policy = false; pi->caps_power_containment = true; pi->caps_cac = true; pi->didt_enabled = false; @@ -2219,6 +2219,7 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate) } } } else { /*pi->caps_vce_pg*/ + pi->vce_power_gated = gate; cz_update_vce_dpm(adev); cz_enable_vce_dpm(adev, !gate); } diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 8227344..c1b04e9 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -2667,19 +2667,21 @@ static void dce_v10_0_cursor_reset(struct drm_crtc *crtc) } } -static void dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, - u16 *blue, uint32_t start, uint32_t size) +static int dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, uint32_t size) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - int end = (start + size > 256) ? 256 : start + size, i; + int i; /* userspace palettes are always correct as is */ - for (i = start; i < end; i++) { + for (i = 0; i < size; i++) { amdgpu_crtc->lut_r[i] = red[i] >> 6; amdgpu_crtc->lut_g[i] = green[i] >> 6; amdgpu_crtc->lut_b[i] = blue[i] >> 6; } dce_v10_0_crtc_load_lut(crtc); + + return 0; } static void dce_v10_0_crtc_destroy(struct drm_crtc *crtc) @@ -2717,13 +2719,13 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode) type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); amdgpu_irq_update(adev, &adev->crtc_irq, type); amdgpu_irq_update(adev, &adev->pageflip_irq, type); - drm_vblank_on(dev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_on(crtc); dce_v10_0_crtc_load_lut(crtc); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: - drm_vblank_off(dev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_off(crtc); if (amdgpu_crtc->enabled) { dce_v10_0_vga_enable(crtc, true); amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE); @@ -3372,7 +3374,7 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev, spin_unlock_irqrestore(&adev->ddev->event_lock, flags); - drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_put(&amdgpu_crtc->base); schedule_work(&works->unpin_work); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index af26ec0..d4bf133 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -307,11 +307,10 @@ static void dce_v11_0_page_flip(struct amdgpu_device *adev, struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; u32 tmp; - /* flip at hsync for async, default is vsync */ - /* use UPDATE_IMMEDIATE_EN instead for async? */ + /* flip immediate for async, default is vsync */ tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset); tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL, - GRPH_SURFACE_UPDATE_H_RETRACE_EN, async ? 1 : 0); + GRPH_SURFACE_UPDATE_IMMEDIATE_EN, async ? 1 : 0); WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp); /* update the scanout addresses */ WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, @@ -2678,19 +2677,21 @@ static void dce_v11_0_cursor_reset(struct drm_crtc *crtc) } } -static void dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, - u16 *blue, uint32_t start, uint32_t size) +static int dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, uint32_t size) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - int end = (start + size > 256) ? 256 : start + size, i; + int i; /* userspace palettes are always correct as is */ - for (i = start; i < end; i++) { + for (i = 0; i < size; i++) { amdgpu_crtc->lut_r[i] = red[i] >> 6; amdgpu_crtc->lut_g[i] = green[i] >> 6; amdgpu_crtc->lut_b[i] = blue[i] >> 6; } dce_v11_0_crtc_load_lut(crtc); + + return 0; } static void dce_v11_0_crtc_destroy(struct drm_crtc *crtc) @@ -2728,13 +2729,13 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode) type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); amdgpu_irq_update(adev, &adev->crtc_irq, type); amdgpu_irq_update(adev, &adev->pageflip_irq, type); - drm_vblank_on(dev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_on(crtc); dce_v11_0_crtc_load_lut(crtc); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: - drm_vblank_off(dev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_off(crtc); if (amdgpu_crtc->enabled) { dce_v11_0_vga_enable(crtc, true); amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE); @@ -3433,7 +3434,7 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev, spin_unlock_irqrestore(&adev->ddev->event_lock, flags); - drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_put(&amdgpu_crtc->base); schedule_work(&works->unpin_work); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 3fb65e4..4fdfab1 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -526,36 +526,16 @@ static void dce_v8_0_stop_mc_access(struct amdgpu_device *adev, crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]), CRTC_CONTROL, CRTC_MASTER_EN); if (crtc_enabled) { -#if 0 - u32 frame_count; - int j; - +#if 1 save->crtc_enabled[i] = true; tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) { - amdgpu_display_vblank_wait(adev, i); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); + /*it is correct only for RGB ; black is 0*/ + WREG32(mmCRTC_BLANK_DATA_COLOR + crtc_offsets[i], 0); tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1); WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); - } - /* wait for the next frame */ - frame_count = amdgpu_display_vblank_get_counter(adev, i); - for (j = 0; j < adev->usec_timeout; j++) { - if (amdgpu_display_vblank_get_counter(adev, i) != frame_count) - break; - udelay(1); - } - tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK) == 0) { - tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1); - WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp); - } - tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK) == 0) { - tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 1); - WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp); } + mdelay(20); #else /* XXX this is a hack to avoid strange behavior with EFI on certain systems */ WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); @@ -575,55 +555,22 @@ static void dce_v8_0_stop_mc_access(struct amdgpu_device *adev, static void dce_v8_0_resume_mc_access(struct amdgpu_device *adev, struct amdgpu_mode_mc_save *save) { - u32 tmp, frame_count; - int i, j; + u32 tmp; + int i; /* update crtc base addresses */ for (i = 0; i < adev->mode_info.num_crtc; i++) { WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], upper_32_bits(adev->mc.vram_start)); - WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], - upper_32_bits(adev->mc.vram_start)); WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i], (u32)adev->mc.vram_start); - WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i], - (u32)adev->mc.vram_start); if (save->crtc_enabled[i]) { - tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 3) { - tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 3); - WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp); - } - tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK)) { - tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0); - WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp); - } - tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK)) { - tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 0); - WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp); - } - for (j = 0; j < adev->usec_timeout; j++) { - tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING) == 0) - break; - udelay(1); - } tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); - /* wait for the next frame */ - frame_count = amdgpu_display_vblank_get_counter(adev, i); - for (j = 0; j < adev->usec_timeout; j++) { - if (amdgpu_display_vblank_get_counter(adev, i) != frame_count) - break; - udelay(1); - } } + mdelay(20); } WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start)); @@ -2574,19 +2521,21 @@ static void dce_v8_0_cursor_reset(struct drm_crtc *crtc) } } -static void dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, - u16 *blue, uint32_t start, uint32_t size) +static int dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, uint32_t size) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - int end = (start + size > 256) ? 256 : start + size, i; + int i; /* userspace palettes are always correct as is */ - for (i = start; i < end; i++) { + for (i = 0; i < size; i++) { amdgpu_crtc->lut_r[i] = red[i] >> 6; amdgpu_crtc->lut_g[i] = green[i] >> 6; amdgpu_crtc->lut_b[i] = blue[i] >> 6; } dce_v8_0_crtc_load_lut(crtc); + + return 0; } static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc) @@ -2624,13 +2573,13 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode) type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); amdgpu_irq_update(adev, &adev->crtc_irq, type); amdgpu_irq_update(adev, &adev->pageflip_irq, type); - drm_vblank_on(dev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_on(crtc); dce_v8_0_crtc_load_lut(crtc); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: - drm_vblank_off(dev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_off(crtc); if (amdgpu_crtc->enabled) { dce_v8_0_vga_enable(crtc, true); amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE); @@ -3376,7 +3325,7 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev, spin_unlock_irqrestore(&adev->ddev->event_lock, flags); - drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_put(&amdgpu_crtc->base); schedule_work(&works->unpin_work); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c index b336c91..b3e19ba 100644 --- a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c +++ b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c @@ -173,7 +173,7 @@ static int fiji_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg) { if (!fiji_is_smc_ram_running(adev)) { - return -EINVAL;; + return -EINVAL; } if (wait_smu_response(adev)) { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index fc8ff4d..d869d05 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -1583,9 +1583,15 @@ static void gfx_v7_0_tiling_mode_table_init(struct amdgpu_device *adev) * registers are instanced per SE or SH. 0xffffffff means * broadcast to all SEs or SHs (CIK). */ -void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num) +static void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, + u32 se_num, u32 sh_num, u32 instance) { - u32 data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK; + u32 data; + + if (instance == 0xffffffff) + data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1); + else + data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance); if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | @@ -1659,13 +1665,13 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v7_0_select_se_sh(adev, i, j); + gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff); data = gfx_v7_0_get_rb_active_bitmap(adev); active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * rb_bitmap_width_per_sh); } } - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); adev->gfx.config.backend_enable_mask = active_rbs; @@ -1746,7 +1752,7 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev) * making sure that the following register writes will be broadcasted * to all the shaders */ - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); /* XXX SH_MEM regs */ /* where to put LDS, scratch, GPUVM in FSA64 space */ @@ -2050,17 +2056,6 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, unsigned vm_id, bool ctx_switch) { u32 header, control = 0; - u32 next_rptr = ring->wptr + 5; - - if (ctx_switch) - next_rptr += 2; - - next_rptr += 4; - amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); - amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM); - amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); - amdgpu_ring_write(ring, next_rptr); /* insert SWITCH_BUFFER packet before first IB in the ring frame */ if (ctx_switch) { @@ -2089,22 +2084,9 @@ static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring, struct amdgpu_ib *ib, unsigned vm_id, bool ctx_switch) { - u32 header, control = 0; - u32 next_rptr = ring->wptr + 5; - - control |= INDIRECT_BUFFER_VALID; - next_rptr += 4; - amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); - amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM); - amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); - amdgpu_ring_write(ring, next_rptr); - - header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); + u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24); - control |= ib->length_dw | (vm_id << 24); - - amdgpu_ring_write(ring, header); + amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); amdgpu_ring_write(ring, #ifdef __BIG_ENDIAN (2 << 0) | @@ -2123,26 +2105,25 @@ static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring, * Provides a basic gfx ring test to verify that IBs are working. * Returns 0 on success, error on failure. */ -static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring) +static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; struct fence *f = NULL; uint32_t scratch; uint32_t tmp = 0; - unsigned i; - int r; + long r; r = amdgpu_gfx_scratch_get(adev, &scratch); if (r) { - DRM_ERROR("amdgpu: failed to get scratch reg (%d).\n", r); + DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r); return r; } WREG32(scratch, 0xCAFEDEAD); memset(&ib, 0, sizeof(ib)); r = amdgpu_ib_get(adev, NULL, 256, &ib); if (r) { - DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); + DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); goto err1; } ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1); @@ -2154,21 +2135,19 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring) if (r) goto err2; - r = fence_wait(f, false); - if (r) { - DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); + r = fence_wait_timeout(f, false, timeout); + if (r == 0) { + DRM_ERROR("amdgpu: IB test timed out\n"); + r = -ETIMEDOUT; goto err2; - } - for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(scratch); - if (tmp == 0xDEADBEEF) - break; - DRM_UDELAY(1); - } - if (i < adev->usec_timeout) { - DRM_INFO("ib test on ring %d succeeded in %u usecs\n", - ring->idx, i); + } else if (r < 0) { + DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); goto err2; + } + tmp = RREG32(scratch); + if (tmp == 0xDEADBEEF) { + DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + r = 0; } else { DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n", scratch, tmp); @@ -2176,7 +2155,6 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring) } err2: - fence_put(f); amdgpu_ib_free(adev, &ib, NULL); fence_put(f); err1: @@ -3221,7 +3199,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) } } adev->gfx.rlc.cs_data = ci_cs_data; - adev->gfx.rlc.cp_table_size = CP_ME_TABLE_SIZE * 5 * 4; + adev->gfx.rlc.cp_table_size = ALIGN(CP_ME_TABLE_SIZE * 5 * 4, 2048); /* CP JT */ + adev->gfx.rlc.cp_table_size += 64 * 1024; /* GDS */ src_ptr = adev->gfx.rlc.reg_list; dws = adev->gfx.rlc.reg_list_size; @@ -3379,7 +3358,7 @@ static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v7_0_select_se_sh(adev, i, j); + gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff); for (k = 0; k < adev->usec_timeout; k++) { if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0) break; @@ -3387,7 +3366,7 @@ static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev) } } } - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK | @@ -3434,7 +3413,7 @@ static u32 gfx_v7_0_halt_rlc(struct amdgpu_device *adev) return orig; } -void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev) +static void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev) { u32 tmp, i, mask; @@ -3456,7 +3435,7 @@ void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev) } } -void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev) +static void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev) { u32 tmp; @@ -3471,7 +3450,7 @@ void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev) * * Halt the RLC ME (MicroEngine) (CIK). */ -void gfx_v7_0_rlc_stop(struct amdgpu_device *adev) +static void gfx_v7_0_rlc_stop(struct amdgpu_device *adev) { WREG32(mmRLC_CNTL, 0); @@ -3547,7 +3526,7 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev) WREG32(mmRLC_LB_CNTR_MAX, 0x00008000); mutex_lock(&adev->grbm_idx_mutex); - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); WREG32(mmRLC_LB_INIT_CU_MASK, 0xffffffff); WREG32(mmRLC_LB_PARAMS, 0x00600408); WREG32(mmRLC_LB_CNTL, 0x80000004); @@ -3587,7 +3566,7 @@ static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable) tmp = gfx_v7_0_halt_rlc(adev); mutex_lock(&adev->grbm_idx_mutex); - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); tmp2 = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | @@ -3638,7 +3617,7 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable) tmp = gfx_v7_0_halt_rlc(adev); mutex_lock(&adev->grbm_idx_mutex); - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | @@ -3689,7 +3668,7 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable) tmp = gfx_v7_0_halt_rlc(adev); mutex_lock(&adev->grbm_idx_mutex); - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_1_MASK; @@ -3867,6 +3846,20 @@ static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev, } } +static void gfx_v7_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev, + u32 bitmap) +{ + u32 data; + + if (!bitmap) + return; + + data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; + data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; + + WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data); +} + static u32 gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device *adev) { u32 data, mask; @@ -4123,7 +4116,7 @@ static void gfx_v7_0_fini_pg(struct amdgpu_device *adev) * Fetches a GPU clock counter snapshot (SI). * Returns the 64 bit clock counter snapshot. */ -uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev) +static uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev) { uint64_t clock; @@ -4183,12 +4176,24 @@ static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring, amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base)); } +static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = { + .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter, + .select_se_sh = &gfx_v7_0_select_se_sh, +}; + +static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = { + .enter_safe_mode = gfx_v7_0_enter_rlc_safe_mode, + .exit_safe_mode = gfx_v7_0_exit_rlc_safe_mode +}; + static int gfx_v7_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; adev->gfx.num_gfx_rings = GFX7_NUM_GFX_RINGS; adev->gfx.num_compute_rings = GFX7_NUM_COMPUTE_RINGS; + adev->gfx.funcs = &gfx_v7_0_gfx_funcs; + adev->gfx.rlc.funcs = &gfx_v7_0_rlc_funcs; gfx_v7_0_set_ring_funcs(adev); gfx_v7_0_set_irq_funcs(adev); gfx_v7_0_set_gds_init(adev); @@ -5032,16 +5037,22 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev) int i, j, k, counter, active_cu_number = 0; u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0; struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info; + unsigned disable_masks[4 * 2]; memset(cu_info, 0, sizeof(*cu_info)); + amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2); + mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { mask = 1; ao_bitmap = 0; counter = 0; - gfx_v7_0_select_se_sh(adev, i, j); + gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff); + if (i < 4 && j < 2) + gfx_v7_0_set_user_cu_inactive_bitmap( + adev, disable_masks[i * 2 + j]); bitmap = gfx_v7_0_get_cu_active_bitmap(adev); cu_info->bitmap[i][j] = bitmap; @@ -5057,7 +5068,7 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev) ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); } } - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); cu_info->number = active_cu_number; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h index e747aa9..94e3ea1 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h @@ -26,11 +26,4 @@ extern const struct amd_ip_funcs gfx_v7_0_ip_funcs; -/* XXX these shouldn't be exported */ -void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev); -void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev); -void gfx_v7_0_rlc_stop(struct amdgpu_device *adev); -uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev); -void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num); - #endif diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index c2ef945..bff8668 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -787,26 +787,25 @@ static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring) return r; } -static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring) +static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; struct fence *f = NULL; uint32_t scratch; uint32_t tmp = 0; - unsigned i; - int r; + long r; r = amdgpu_gfx_scratch_get(adev, &scratch); if (r) { - DRM_ERROR("amdgpu: failed to get scratch reg (%d).\n", r); + DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r); return r; } WREG32(scratch, 0xCAFEDEAD); memset(&ib, 0, sizeof(ib)); r = amdgpu_ib_get(adev, NULL, 256, &ib); if (r) { - DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); + DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); goto err1; } ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1); @@ -818,28 +817,25 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring) if (r) goto err2; - r = fence_wait(f, false); - if (r) { - DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); + r = fence_wait_timeout(f, false, timeout); + if (r == 0) { + DRM_ERROR("amdgpu: IB test timed out.\n"); + r = -ETIMEDOUT; goto err2; - } - for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(scratch); - if (tmp == 0xDEADBEEF) - break; - DRM_UDELAY(1); - } - if (i < adev->usec_timeout) { - DRM_INFO("ib test on ring %d succeeded in %u usecs\n", - ring->idx, i); + } else if (r < 0) { + DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); goto err2; + } + tmp = RREG32(scratch); + if (tmp == 0xDEADBEEF) { + DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + r = 0; } else { DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n", scratch, tmp); r = -EINVAL; } err2: - fence_put(f); amdgpu_ib_free(adev, &ib, NULL); fence_put(f); err1: @@ -1160,6 +1156,71 @@ static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev, buffer[count++] = cpu_to_le32(0); } +static void cz_init_cp_jump_table(struct amdgpu_device *adev) +{ + const __le32 *fw_data; + volatile u32 *dst_ptr; + int me, i, max_me = 4; + u32 bo_offset = 0; + u32 table_offset, table_size; + + if (adev->asic_type == CHIP_CARRIZO) + max_me = 5; + + /* write the cp table buffer */ + dst_ptr = adev->gfx.rlc.cp_table_ptr; + for (me = 0; me < max_me; me++) { + if (me == 0) { + const struct gfx_firmware_header_v1_0 *hdr = + (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data; + fw_data = (const __le32 *) + (adev->gfx.ce_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + table_offset = le32_to_cpu(hdr->jt_offset); + table_size = le32_to_cpu(hdr->jt_size); + } else if (me == 1) { + const struct gfx_firmware_header_v1_0 *hdr = + (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; + fw_data = (const __le32 *) + (adev->gfx.pfp_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + table_offset = le32_to_cpu(hdr->jt_offset); + table_size = le32_to_cpu(hdr->jt_size); + } else if (me == 2) { + const struct gfx_firmware_header_v1_0 *hdr = + (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; + fw_data = (const __le32 *) + (adev->gfx.me_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + table_offset = le32_to_cpu(hdr->jt_offset); + table_size = le32_to_cpu(hdr->jt_size); + } else if (me == 3) { + const struct gfx_firmware_header_v1_0 *hdr = + (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; + fw_data = (const __le32 *) + (adev->gfx.mec_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + table_offset = le32_to_cpu(hdr->jt_offset); + table_size = le32_to_cpu(hdr->jt_size); + } else if (me == 4) { + const struct gfx_firmware_header_v1_0 *hdr = + (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; + fw_data = (const __le32 *) + (adev->gfx.mec2_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + table_offset = le32_to_cpu(hdr->jt_offset); + table_size = le32_to_cpu(hdr->jt_size); + } + + for (i = 0; i < table_size; i ++) { + dst_ptr[bo_offset + i] = + cpu_to_le32(le32_to_cpu(fw_data[table_offset + i])); + } + + bo_offset += table_size; + } +} + static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev) { int r; @@ -1175,6 +1236,18 @@ static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev) amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj); adev->gfx.rlc.clear_state_obj = NULL; } + + /* jump table block */ + if (adev->gfx.rlc.cp_table_obj) { + r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false); + if (unlikely(r != 0)) + dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r); + amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj); + amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); + + amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj); + adev->gfx.rlc.cp_table_obj = NULL; + } } static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) @@ -1231,6 +1304,46 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); } + if ((adev->asic_type == CHIP_CARRIZO) || + (adev->asic_type == CHIP_STONEY)) { + adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */ + if (adev->gfx.rlc.cp_table_obj == NULL) { + r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true, + AMDGPU_GEM_DOMAIN_VRAM, + AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, + NULL, NULL, + &adev->gfx.rlc.cp_table_obj); + if (r) { + dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r); + return r; + } + } + + r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false); + if (unlikely(r != 0)) { + dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r); + return r; + } + r = amdgpu_bo_pin(adev->gfx.rlc.cp_table_obj, AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.rlc.cp_table_gpu_addr); + if (r) { + amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); + dev_warn(adev->dev, "(%d) pin RLC cp_table bo failed\n", r); + return r; + } + r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr); + if (r) { + dev_warn(adev->dev, "(%d) map RLC cp table bo failed\n", r); + return r; + } + + cz_init_cp_jump_table(adev); + + amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj); + amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); + + } + return 0; } @@ -1612,7 +1725,6 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) RREG32(sec_ded_counter_registers[i]); fail: - fence_put(f); amdgpu_ib_free(adev, &ib, NULL); fence_put(f); @@ -3339,9 +3451,15 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev) } } -void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num) +static void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, + u32 se_num, u32 sh_num, u32 instance) { - u32 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1); + u32 data; + + if (instance == 0xffffffff) + data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1); + else + data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance); if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) { data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1); @@ -3391,13 +3509,13 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v8_0_select_se_sh(adev, i, j); + gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff); data = gfx_v8_0_get_rb_active_bitmap(adev); active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * rb_bitmap_width_per_sh); } } - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); adev->gfx.config.backend_enable_mask = active_rbs; @@ -3501,7 +3619,7 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) * making sure that the following register writes will be broadcasted * to all the shaders */ - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); WREG32(mmPA_SC_FIFO_SIZE, (adev->gfx.config.sc_prim_fifo_size_frontend << @@ -3524,7 +3642,7 @@ static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v8_0_select_se_sh(adev, i, j); + gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff); for (k = 0; k < adev->usec_timeout; k++) { if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0) break; @@ -3532,7 +3650,7 @@ static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev) } } } - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK | @@ -3693,13 +3811,13 @@ static void gfx_v8_0_enable_save_restore_machine(struct amdgpu_device *adev) WREG32(mmRLC_SRM_CNTL, data); } -static void polaris11_init_power_gating(struct amdgpu_device *adev) +static void gfx_v8_0_init_power_gating(struct amdgpu_device *adev) { uint32_t data; if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | - AMD_PG_SUPPORT_GFX_SMG | - AMD_PG_SUPPORT_GFX_DMG)) { + AMD_PG_SUPPORT_GFX_SMG | + AMD_PG_SUPPORT_GFX_DMG)) { data = RREG32(mmCP_RB_WPTR_POLL_CNTL); data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK; data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); @@ -3724,6 +3842,53 @@ static void polaris11_init_power_gating(struct amdgpu_device *adev) } } +static void cz_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev, + bool enable) +{ + u32 data, orig; + + orig = data = RREG32(mmRLC_PG_CNTL); + + if (enable) + data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK; + else + data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK; + + if (orig != data) + WREG32(mmRLC_PG_CNTL, data); +} + +static void cz_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev, + bool enable) +{ + u32 data, orig; + + orig = data = RREG32(mmRLC_PG_CNTL); + + if (enable) + data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK; + else + data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK; + + if (orig != data) + WREG32(mmRLC_PG_CNTL, data); +} + +static void cz_enable_cp_power_gating(struct amdgpu_device *adev, bool enable) +{ + u32 data, orig; + + orig = data = RREG32(mmRLC_PG_CNTL); + + if (enable) + data &= ~RLC_PG_CNTL__CP_PG_DISABLE_MASK; + else + data |= RLC_PG_CNTL__CP_PG_DISABLE_MASK; + + if (orig != data) + WREG32(mmRLC_PG_CNTL, data); +} + static void gfx_v8_0_init_pg(struct amdgpu_device *adev) { if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | @@ -3736,8 +3901,25 @@ static void gfx_v8_0_init_pg(struct amdgpu_device *adev) gfx_v8_0_init_save_restore_list(adev); gfx_v8_0_enable_save_restore_machine(adev); - if (adev->asic_type == CHIP_POLARIS11) - polaris11_init_power_gating(adev); + if ((adev->asic_type == CHIP_CARRIZO) || + (adev->asic_type == CHIP_STONEY)) { + WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8); + gfx_v8_0_init_power_gating(adev); + WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask); + if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) { + cz_enable_sck_slow_down_on_power_up(adev, true); + cz_enable_sck_slow_down_on_power_down(adev, true); + } else { + cz_enable_sck_slow_down_on_power_up(adev, false); + cz_enable_sck_slow_down_on_power_down(adev, false); + } + if (adev->pg_flags & AMD_PG_SUPPORT_CP) + cz_enable_cp_power_gating(adev, true); + else + cz_enable_cp_power_gating(adev, false); + } else if (adev->asic_type == CHIP_POLARIS11) { + gfx_v8_0_init_power_gating(adev); + } } } @@ -4976,7 +5158,7 @@ static int gfx_v8_0_soft_reset(void *handle) * Fetches a GPU clock counter snapshot. * Returns the 64 bit clock counter snapshot. */ -uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev) +static uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev) { uint64_t clock; @@ -5036,12 +5218,18 @@ static void gfx_v8_0_ring_emit_gds_switch(struct amdgpu_ring *ring, amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base)); } +static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = { + .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter, + .select_se_sh = &gfx_v8_0_select_se_sh, +}; + static int gfx_v8_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; adev->gfx.num_gfx_rings = GFX8_NUM_GFX_RINGS; adev->gfx.num_compute_rings = GFX8_NUM_COMPUTE_RINGS; + adev->gfx.funcs = &gfx_v8_0_gfx_funcs; gfx_v8_0_set_ring_funcs(adev); gfx_v8_0_set_irq_funcs(adev); gfx_v8_0_set_gds_init(adev); @@ -5074,51 +5262,43 @@ static int gfx_v8_0_late_init(void *handle) return 0; } -static void polaris11_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev, - bool enable) +static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev, + bool enable) { uint32_t data, temp; - /* Send msg to SMU via Powerplay */ - amdgpu_set_powergating_state(adev, - AMD_IP_BLOCK_TYPE_SMC, - enable ? AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE); + if (adev->asic_type == CHIP_POLARIS11) + /* Send msg to SMU via Powerplay */ + amdgpu_set_powergating_state(adev, + AMD_IP_BLOCK_TYPE_SMC, + enable ? + AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE); - if (enable) { - /* Enable static MGPG */ - temp = data = RREG32(mmRLC_PG_CNTL); + temp = data = RREG32(mmRLC_PG_CNTL); + /* Enable static MGPG */ + if (enable) data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK; - - if (temp != data) - WREG32(mmRLC_PG_CNTL, data); - } else { - temp = data = RREG32(mmRLC_PG_CNTL); + else data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK; - if (temp != data) - WREG32(mmRLC_PG_CNTL, data); - } + if (temp != data) + WREG32(mmRLC_PG_CNTL, data); } -static void polaris11_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev, - bool enable) +static void gfx_v8_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev, + bool enable) { uint32_t data, temp; - if (enable) { - /* Enable dynamic MGPG */ - temp = data = RREG32(mmRLC_PG_CNTL); + temp = data = RREG32(mmRLC_PG_CNTL); + /* Enable dynamic MGPG */ + if (enable) data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK; - - if (temp != data) - WREG32(mmRLC_PG_CNTL, data); - } else { - temp = data = RREG32(mmRLC_PG_CNTL); + else data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK; - if (temp != data) - WREG32(mmRLC_PG_CNTL, data); - } + if (temp != data) + WREG32(mmRLC_PG_CNTL, data); } static void polaris11_enable_gfx_quick_mg_power_gating(struct amdgpu_device *adev, @@ -5126,19 +5306,63 @@ static void polaris11_enable_gfx_quick_mg_power_gating(struct amdgpu_device *ade { uint32_t data, temp; - if (enable) { - /* Enable quick PG */ - temp = data = RREG32(mmRLC_PG_CNTL); - data |= 0x100000; + temp = data = RREG32(mmRLC_PG_CNTL); + /* Enable quick PG */ + if (enable) + data |= RLC_PG_CNTL__QUICK_PG_ENABLE_MASK; + else + data &= ~RLC_PG_CNTL__QUICK_PG_ENABLE_MASK; - if (temp != data) - WREG32(mmRLC_PG_CNTL, data); - } else { - temp = data = RREG32(mmRLC_PG_CNTL); - data &= ~0x100000; + if (temp != data) + WREG32(mmRLC_PG_CNTL, data); +} - if (temp != data) - WREG32(mmRLC_PG_CNTL, data); +static void cz_enable_gfx_cg_power_gating(struct amdgpu_device *adev, + bool enable) +{ + u32 data, orig; + + orig = data = RREG32(mmRLC_PG_CNTL); + + if (enable) + data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; + else + data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; + + if (orig != data) + WREG32(mmRLC_PG_CNTL, data); +} + +static void cz_enable_gfx_pipeline_power_gating(struct amdgpu_device *adev, + bool enable) +{ + u32 data, orig; + + orig = data = RREG32(mmRLC_PG_CNTL); + + if (enable) + data |= RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK; + else + data &= ~RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK; + + if (orig != data) + WREG32(mmRLC_PG_CNTL, data); + + /* Read any GFX register to wake up GFX. */ + if (!enable) + data = RREG32(mmDB_RENDER_CONTROL); +} + +static void cz_update_gfx_cg_power_gating(struct amdgpu_device *adev, + bool enable) +{ + if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) { + cz_enable_gfx_cg_power_gating(adev, true); + if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE) + cz_enable_gfx_pipeline_power_gating(adev, true); + } else { + cz_enable_gfx_cg_power_gating(adev, false); + cz_enable_gfx_pipeline_power_gating(adev, false); } } @@ -5146,21 +5370,42 @@ static int gfx_v8_0_set_powergating_state(void *handle, enum amd_powergating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + bool enable = (state == AMD_PG_STATE_GATE) ? true : false; if (!(adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) return 0; switch (adev->asic_type) { + case CHIP_CARRIZO: + case CHIP_STONEY: + if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) + cz_update_gfx_cg_power_gating(adev, enable); + + if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable) + gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true); + else + gfx_v8_0_enable_gfx_static_mg_power_gating(adev, false); + + if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable) + gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, true); + else + gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false); + break; case CHIP_POLARIS11: - if (adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) - polaris11_enable_gfx_static_mg_power_gating(adev, - state == AMD_PG_STATE_GATE ? true : false); - else if (adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) - polaris11_enable_gfx_dynamic_mg_power_gating(adev, - state == AMD_PG_STATE_GATE ? true : false); + if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable) + gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true); + else + gfx_v8_0_enable_gfx_static_mg_power_gating(adev, false); + + if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable) + gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, true); + else + gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false); + + if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_QUICK_MG) && enable) + polaris11_enable_gfx_quick_mg_power_gating(adev, true); else - polaris11_enable_gfx_quick_mg_power_gating(adev, - state == AMD_PG_STATE_GATE ? true : false); + polaris11_enable_gfx_quick_mg_power_gating(adev, false); break; default: break; @@ -5174,7 +5419,7 @@ static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev, { uint32_t data; - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); @@ -5562,6 +5807,8 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev WREG32(mmRLC_CGCG_CGLS_CTRL, data); } + gfx_v8_0_wait_for_rlc_serdes(adev); + adev->gfx.rlc.funcs->exit_safe_mode(adev); } static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev, @@ -5687,17 +5934,6 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, unsigned vm_id, bool ctx_switch) { u32 header, control = 0; - u32 next_rptr = ring->wptr + 5; - - if (ctx_switch) - next_rptr += 2; - - next_rptr += 4; - amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); - amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM); - amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); - amdgpu_ring_write(ring, next_rptr); /* insert SWITCH_BUFFER packet before first IB in the ring frame */ if (ctx_switch) { @@ -5726,23 +5962,9 @@ static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring, struct amdgpu_ib *ib, unsigned vm_id, bool ctx_switch) { - u32 header, control = 0; - u32 next_rptr = ring->wptr + 5; - - control |= INDIRECT_BUFFER_VALID; - - next_rptr += 4; - amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); - amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM); - amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); - amdgpu_ring_write(ring, next_rptr); + u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24); - header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); - - control |= ib->length_dw | (vm_id << 24); - - amdgpu_ring_write(ring, header); + amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); amdgpu_ring_write(ring, #ifdef __BIG_ENDIAN (2 << 0) | @@ -6195,9 +6417,9 @@ static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev) { switch (adev->asic_type) { case CHIP_TOPAZ: - case CHIP_STONEY: adev->gfx.rlc.funcs = &iceland_rlc_funcs; break; + case CHIP_STONEY: case CHIP_CARRIZO: adev->gfx.rlc.funcs = &cz_rlc_funcs; break; @@ -6235,6 +6457,20 @@ static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev) } } +static void gfx_v8_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev, + u32 bitmap) +{ + u32 data; + + if (!bitmap) + return; + + data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; + data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; + + WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data); +} + static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev) { u32 data, mask; @@ -6255,16 +6491,22 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev) int i, j, k, counter, active_cu_number = 0; u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0; struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info; + unsigned disable_masks[4 * 2]; memset(cu_info, 0, sizeof(*cu_info)); + amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2); + mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { mask = 1; ao_bitmap = 0; counter = 0; - gfx_v8_0_select_se_sh(adev, i, j); + gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff); + if (i < 4 && j < 2) + gfx_v8_0_set_user_cu_inactive_bitmap( + adev, disable_masks[i * 2 + j]); bitmap = gfx_v8_0_get_cu_active_bitmap(adev); cu_info->bitmap[i][j] = bitmap; @@ -6280,7 +6522,7 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev) ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); } } - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); cu_info->number = active_cu_number; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h index 16a49f5..bc82c79 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h @@ -26,7 +26,6 @@ extern const struct amd_ip_funcs gfx_v8_0_ip_funcs; -uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev); void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 1feb643..d24a82b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -39,6 +39,7 @@ static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev); static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev); +static int gmc_v7_0_wait_for_idle(void *handle); MODULE_FIRMWARE("radeon/bonaire_mc.bin"); MODULE_FIRMWARE("radeon/hawaii_mc.bin"); @@ -73,39 +74,15 @@ static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev) } } -/** - * gmc7_mc_wait_for_idle - wait for MC idle callback. - * - * @adev: amdgpu_device pointer - * - * Wait for the MC (memory controller) to be idle. - * (evergreen+). - * Returns 0 if the MC is idle, -1 if not. - */ -int gmc_v7_0_mc_wait_for_idle(struct amdgpu_device *adev) -{ - unsigned i; - u32 tmp; - - for (i = 0; i < adev->usec_timeout; i++) { - /* read MC_STATUS */ - tmp = RREG32(mmSRBM_STATUS) & 0x1F00; - if (!tmp) - return 0; - udelay(1); - } - return -1; -} - -void gmc_v7_0_mc_stop(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) +static void gmc_v7_0_mc_stop(struct amdgpu_device *adev, + struct amdgpu_mode_mc_save *save) { u32 blackout; if (adev->mode_info.num_crtc) amdgpu_display_stop_mc_access(adev, save); - amdgpu_asic_wait_for_mc_idle(adev); + gmc_v7_0_wait_for_idle((void *)adev); blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) { @@ -120,8 +97,8 @@ void gmc_v7_0_mc_stop(struct amdgpu_device *adev, udelay(100); } -void gmc_v7_0_mc_resume(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) +static void gmc_v7_0_mc_resume(struct amdgpu_device *adev, + struct amdgpu_mode_mc_save *save) { u32 tmp; @@ -311,7 +288,7 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev) amdgpu_display_set_vga_render_state(adev, false); gmc_v7_0_mc_stop(adev, &save); - if (amdgpu_asic_wait_for_mc_idle(adev)) { + if (gmc_v7_0_wait_for_idle((void *)adev)) { dev_warn(adev->dev, "Wait for MC idle timedout !\n"); } /* Update configuration */ @@ -331,7 +308,7 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev) WREG32(mmMC_VM_AGP_BASE, 0); WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF); WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF); - if (amdgpu_asic_wait_for_mc_idle(adev)) { + if (gmc_v7_0_wait_for_idle((void *)adev)) { dev_warn(adev->dev, "Wait for MC idle timedout !\n"); } gmc_v7_0_mc_resume(adev, &save); @@ -1137,7 +1114,7 @@ static int gmc_v7_0_soft_reset(void *handle) if (srbm_soft_reset) { gmc_v7_0_mc_stop(adev, &save); - if (gmc_v7_0_wait_for_idle(adev)) { + if (gmc_v7_0_wait_for_idle((void *)adev)) { dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h index 36fcbbc..0b386b5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h @@ -26,11 +26,4 @@ extern const struct amd_ip_funcs gmc_v7_0_ip_funcs; -/* XXX these shouldn't be exported */ -void gmc_v7_0_mc_stop(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save); -void gmc_v7_0_mc_resume(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save); -int gmc_v7_0_mc_wait_for_idle(struct amdgpu_device *adev); - #endif diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 9945d5b..717359d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -41,6 +41,7 @@ static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev); static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev); +static int gmc_v8_0_wait_for_idle(void *handle); MODULE_FIRMWARE("amdgpu/tonga_mc.bin"); MODULE_FIRMWARE("amdgpu/polaris11_mc.bin"); @@ -147,44 +148,15 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev) } } -/** - * gmc8_mc_wait_for_idle - wait for MC idle callback. - * - * @adev: amdgpu_device pointer - * - * Wait for the MC (memory controller) to be idle. - * (evergreen+). - * Returns 0 if the MC is idle, -1 if not. - */ -int gmc_v8_0_mc_wait_for_idle(struct amdgpu_device *adev) -{ - unsigned i; - u32 tmp; - - for (i = 0; i < adev->usec_timeout; i++) { - /* read MC_STATUS */ - tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__VMC_BUSY_MASK | - SRBM_STATUS__MCB_BUSY_MASK | - SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | - SRBM_STATUS__MCC_BUSY_MASK | - SRBM_STATUS__MCD_BUSY_MASK | - SRBM_STATUS__VMC1_BUSY_MASK); - if (!tmp) - return 0; - udelay(1); - } - return -1; -} - -void gmc_v8_0_mc_stop(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) +static void gmc_v8_0_mc_stop(struct amdgpu_device *adev, + struct amdgpu_mode_mc_save *save) { u32 blackout; if (adev->mode_info.num_crtc) amdgpu_display_stop_mc_access(adev, save); - amdgpu_asic_wait_for_mc_idle(adev); + gmc_v8_0_wait_for_idle(adev); blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) { @@ -199,8 +171,8 @@ void gmc_v8_0_mc_stop(struct amdgpu_device *adev, udelay(100); } -void gmc_v8_0_mc_resume(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) +static void gmc_v8_0_mc_resume(struct amdgpu_device *adev, + struct amdgpu_mode_mc_save *save) { u32 tmp; @@ -393,7 +365,7 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev) amdgpu_display_set_vga_render_state(adev, false); gmc_v8_0_mc_stop(adev, &save); - if (amdgpu_asic_wait_for_mc_idle(adev)) { + if (gmc_v8_0_wait_for_idle((void *)adev)) { dev_warn(adev->dev, "Wait for MC idle timedout !\n"); } /* Update configuration */ @@ -413,7 +385,7 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev) WREG32(mmMC_VM_AGP_BASE, 0); WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF); WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF); - if (amdgpu_asic_wait_for_mc_idle(adev)) { + if (gmc_v8_0_wait_for_idle((void *)adev)) { dev_warn(adev->dev, "Wait for MC idle timedout !\n"); } gmc_v8_0_mc_resume(adev, &save); @@ -1140,7 +1112,7 @@ static int gmc_v8_0_soft_reset(void *handle) if (srbm_soft_reset) { gmc_v8_0_mc_stop(adev, &save); - if (gmc_v8_0_wait_for_idle(adev)) { + if (gmc_v8_0_wait_for_idle((void *)adev)) { dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h index 9734360..fc5001a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h @@ -26,11 +26,4 @@ extern const struct amd_ip_funcs gmc_v8_0_ip_funcs; -/* XXX these shouldn't be exported */ -void gmc_v8_0_mc_stop(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save); -void gmc_v8_0_mc_resume(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save); -int gmc_v8_0_mc_wait_for_idle(struct amdgpu_device *adev); - #endif diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c index 825ccd6..2f078ad 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c @@ -24,7 +24,7 @@ #include <linux/firmware.h> #include "drmP.h" #include "amdgpu.h" -#include "iceland_smumgr.h" +#include "iceland_smum.h" MODULE_FIRMWARE("amdgpu/topaz_smc.bin"); diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c index 52ee081..2118399 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c @@ -25,7 +25,7 @@ #include "drmP.h" #include "amdgpu.h" #include "ppsmc.h" -#include "iceland_smumgr.h" +#include "iceland_smum.h" #include "smu_ucode_xfer_vi.h" #include "amdgpu_ucode.h" @@ -211,7 +211,7 @@ static int iceland_send_msg_to_smc_without_waiting(struct amdgpu_device *adev, PPSMC_Msg msg) { if (!iceland_is_smc_ram_running(adev)) - return -EINVAL;; + return -EINVAL; if (wait_smu_response(adev)) { DRM_ERROR("Failed to send previous message\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smumgr.h b/drivers/gpu/drm/amd/amdgpu/iceland_smum.h index 1e0769e..5983e31 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_smumgr.h +++ b/drivers/gpu/drm/amd/amdgpu/iceland_smum.h @@ -21,8 +21,8 @@ * */ -#ifndef ICELAND_SMUMGR_H -#define ICELAND_SMUMGR_H +#ifndef ICELAND_SMUM_H +#define ICELAND_SMUM_H #include "ppsmc.h" diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index a789a86..a845e88 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c @@ -191,6 +191,7 @@ static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev, vid_mapping_table->num_entries = i; } +#if 0 static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] = { { 0, 4, 1 }, @@ -289,6 +290,7 @@ static const struct kv_lcac_config_reg cpl_cac_config_reg[] = { { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } }; +#endif static const struct kv_pt_config_reg didt_config_kv[] = { @@ -507,19 +509,19 @@ static int kv_enable_didt(struct amdgpu_device *adev, bool enable) pi->caps_db_ramping || pi->caps_td_ramping || pi->caps_tcp_ramping) { - gfx_v7_0_enter_rlc_safe_mode(adev); + adev->gfx.rlc.funcs->enter_safe_mode(adev); if (enable) { ret = kv_program_pt_config_registers(adev, didt_config_kv); if (ret) { - gfx_v7_0_exit_rlc_safe_mode(adev); + adev->gfx.rlc.funcs->exit_safe_mode(adev); return ret; } } kv_do_enable_didt(adev, enable); - gfx_v7_0_exit_rlc_safe_mode(adev); + adev->gfx.rlc.funcs->exit_safe_mode(adev); } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/ppsmc.h b/drivers/gpu/drm/amd/amdgpu/ppsmc.h index 7837f2e..8463245 100644 --- a/drivers/gpu/drm/amd/amdgpu/ppsmc.h +++ b/drivers/gpu/drm/amd/amdgpu/ppsmc.h @@ -90,7 +90,9 @@ typedef uint8_t PPSMC_Result; #define PPSMC_StartFanControl ((uint8_t)0x5B) #define PPSMC_StopFanControl ((uint8_t)0x5C) #define PPSMC_MSG_NoDisplay ((uint8_t)0x5D) +#define PPSMC_NoDisplay ((uint8_t)0x5D) #define PPSMC_MSG_HasDisplay ((uint8_t)0x5E) +#define PPSMC_HasDisplay ((uint8_t)0x5E) #define PPSMC_MSG_UVDPowerOFF ((uint8_t)0x60) #define PPSMC_MSG_UVDPowerON ((uint8_t)0x61) #define PPSMC_MSG_EnableULV ((uint8_t)0x62) @@ -108,6 +110,7 @@ typedef uint8_t PPSMC_Result; #define PPSMC_MSG_DisableDTE ((uint8_t)0x88) #define PPSMC_MSG_ThrottleOVRDSCLKDS ((uint8_t)0x96) #define PPSMC_MSG_CancelThrottleOVRDSCLKDS ((uint8_t)0x97) +#define PPSMC_MSG_EnableACDCGPIOInterrupt ((uint16_t) 0x149) /* CI/KV/KB */ #define PPSMC_MSG_UVDDPM_SetEnabledMask ((uint16_t) 0x12D) @@ -161,6 +164,7 @@ typedef uint8_t PPSMC_Result; #define PPSMC_MSG_MASTER_DeepSleep_OFF ((uint16_t) 0x190) #define PPSMC_MSG_Remove_DC_Clamp ((uint16_t) 0x191) #define PPSMC_MSG_SetFanPwmMax ((uint16_t) 0x19A) +#define PPSMC_MSG_SetFanRpmMax ((uint16_t) 0x205) #define PPSMC_MSG_ENABLE_THERMAL_DPM ((uint16_t) 0x19C) #define PPSMC_MSG_DISABLE_THERMAL_DPM ((uint16_t) 0x19D) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index b556bd0..1351c7e 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -255,19 +255,6 @@ static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring, unsigned vm_id, bool ctx_switch) { u32 vmid = vm_id & 0xf; - u32 next_rptr = ring->wptr + 5; - - while ((next_rptr & 7) != 2) - next_rptr++; - - next_rptr += 6; - - amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | - SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)); - amdgpu_ring_write(ring, lower_32_bits(ring->next_rptr_gpu_addr) & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr)); - amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1)); - amdgpu_ring_write(ring, next_rptr); /* IB packet must end on a 8 DW boundary */ sdma_v2_4_ring_insert_nop(ring, (10 - (ring->wptr & 7)) % 8); @@ -406,7 +393,7 @@ static void sdma_v2_4_enable(struct amdgpu_device *adev, bool enable) u32 f32_cntl; int i; - if (enable == false) { + if (!enable) { sdma_v2_4_gfx_stop(adev); sdma_v2_4_rlc_stop(adev); } @@ -580,19 +567,21 @@ static int sdma_v2_4_start(struct amdgpu_device *adev) { int r; - if (!adev->firmware.smu_load) { - r = sdma_v2_4_load_microcode(adev); - if (r) - return r; - } else { - r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, - AMDGPU_UCODE_ID_SDMA0); - if (r) - return -EINVAL; - r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, - AMDGPU_UCODE_ID_SDMA1); - if (r) - return -EINVAL; + if (!adev->pp_enabled) { + if (!adev->firmware.smu_load) { + r = sdma_v2_4_load_microcode(adev); + if (r) + return r; + } else { + r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, + AMDGPU_UCODE_ID_SDMA0); + if (r) + return -EINVAL; + r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, + AMDGPU_UCODE_ID_SDMA1); + if (r) + return -EINVAL; + } } /* halt the engine before programing */ @@ -679,20 +668,19 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring) * Test a simple IB in the DMA ring (VI). * Returns 0 on success, error on failure. */ -static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring) +static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; struct fence *f = NULL; - unsigned i; unsigned index; - int r; u32 tmp = 0; u64 gpu_addr; + long r; r = amdgpu_wb_get(adev, &index); if (r) { - dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); + dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); return r; } @@ -702,7 +690,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring) memset(&ib, 0, sizeof(ib)); r = amdgpu_ib_get(adev, NULL, 256, &ib); if (r) { - DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); + DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); goto err0; } @@ -721,28 +709,25 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring) if (r) goto err1; - r = fence_wait(f, false); - if (r) { - DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); + r = fence_wait_timeout(f, false, timeout); + if (r == 0) { + DRM_ERROR("amdgpu: IB test timed out\n"); + r = -ETIMEDOUT; goto err1; - } - for (i = 0; i < adev->usec_timeout; i++) { - tmp = le32_to_cpu(adev->wb.wb[index]); - if (tmp == 0xDEADBEEF) - break; - DRM_UDELAY(1); - } - if (i < adev->usec_timeout) { - DRM_INFO("ib test on ring %d succeeded in %u usecs\n", - ring->idx, i); + } else if (r) { + DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); goto err1; + } + tmp = le32_to_cpu(adev->wb.wb[index]); + if (tmp == 0xDEADBEEF) { + DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + r = 0; } else { DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); r = -EINVAL; } err1: - fence_put(f); amdgpu_ib_free(adev, &ib, NULL); fence_put(f); err0: diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 532ea88..653ce5e 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -415,18 +415,6 @@ static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring, unsigned vm_id, bool ctx_switch) { u32 vmid = vm_id & 0xf; - u32 next_rptr = ring->wptr + 5; - - while ((next_rptr & 7) != 2) - next_rptr++; - next_rptr += 6; - - amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | - SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)); - amdgpu_ring_write(ring, lower_32_bits(ring->next_rptr_gpu_addr) & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr)); - amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1)); - amdgpu_ring_write(ring, next_rptr); /* IB packet must end on a 8 DW boundary */ sdma_v3_0_ring_insert_nop(ring, (10 - (ring->wptr & 7)) % 8); @@ -616,7 +604,7 @@ static void sdma_v3_0_enable(struct amdgpu_device *adev, bool enable) u32 f32_cntl; int i; - if (enable == false) { + if (!enable) { sdma_v3_0_gfx_stop(adev); sdma_v3_0_rlc_stop(adev); } @@ -908,20 +896,19 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring) * Test a simple IB in the DMA ring (VI). * Returns 0 on success, error on failure. */ -static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring) +static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; struct fence *f = NULL; - unsigned i; unsigned index; - int r; u32 tmp = 0; u64 gpu_addr; + long r; r = amdgpu_wb_get(adev, &index); if (r) { - dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); + dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); return r; } @@ -931,7 +918,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring) memset(&ib, 0, sizeof(ib)); r = amdgpu_ib_get(adev, NULL, 256, &ib); if (r) { - DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); + DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); goto err0; } @@ -950,27 +937,24 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring) if (r) goto err1; - r = fence_wait(f, false); - if (r) { - DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); + r = fence_wait_timeout(f, false, timeout); + if (r == 0) { + DRM_ERROR("amdgpu: IB test timed out\n"); + r = -ETIMEDOUT; goto err1; - } - for (i = 0; i < adev->usec_timeout; i++) { - tmp = le32_to_cpu(adev->wb.wb[index]); - if (tmp == 0xDEADBEEF) - break; - DRM_UDELAY(1); - } - if (i < adev->usec_timeout) { - DRM_INFO("ib test on ring %d succeeded in %u usecs\n", - ring->idx, i); + } else if (r < 0) { + DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); goto err1; + } + tmp = le32_to_cpu(adev->wb.wb[index]); + if (tmp == 0xDEADBEEF) { + DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + r = 0; } else { DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); r = -EINVAL; } err1: - fence_put(f); amdgpu_ib_free(adev, &ib, NULL); fence_put(f); err0: diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c index 083893d..940de18 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c @@ -173,7 +173,7 @@ static int tonga_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg) { if (!tonga_is_smc_ram_running(adev)) { - return -EINVAL;; + return -EINVAL; } if (wait_smu_response(adev)) { diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index f075514..132e613 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -34,6 +34,8 @@ #include "oss/oss_2_0_d.h" #include "oss/oss_2_0_sh_mask.h" +#include "bif/bif_4_1_d.h" + static void uvd_v4_2_mc_resume(struct amdgpu_device *adev); static void uvd_v4_2_init_cg(struct amdgpu_device *adev); static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev); @@ -439,6 +441,32 @@ static void uvd_v4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq } /** + * uvd_v4_2_ring_emit_hdp_flush - emit an hdp flush + * + * @ring: amdgpu_ring pointer + * + * Emits an hdp flush. + */ +static void uvd_v4_2_ring_emit_hdp_flush(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0)); + amdgpu_ring_write(ring, 0); +} + +/** + * uvd_v4_2_ring_hdp_invalidate - emit an hdp invalidate + * + * @ring: amdgpu_ring pointer + * + * Emits an hdp invalidate. + */ +static void uvd_v4_2_ring_emit_hdp_invalidate(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0)); + amdgpu_ring_write(ring, 1); +} + +/** * uvd_v4_2_ring_test_ring - register write test * * @ring: amdgpu_ring pointer @@ -499,49 +527,6 @@ static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring, } /** - * uvd_v4_2_ring_test_ib - test ib execution - * - * @ring: amdgpu_ring pointer - * - * Test if we can successfully execute an IB - */ -static int uvd_v4_2_ring_test_ib(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - struct fence *fence = NULL; - int r; - - r = amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); - if (r) { - DRM_ERROR("amdgpu: failed to raise UVD clocks (%d).\n", r); - return r; - } - - r = amdgpu_uvd_get_create_msg(ring, 1, NULL); - if (r) { - DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r); - goto error; - } - - r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence); - if (r) { - DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r); - goto error; - } - - r = fence_wait(fence, false); - if (r) { - DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); - goto error; - } - DRM_INFO("ib test on ring %d succeeded\n", ring->idx); -error: - fence_put(fence); - amdgpu_asic_set_uvd_clocks(adev, 0, 0); - return r; -} - -/** * uvd_v4_2_mc_resume - memory controller programming * * @adev: amdgpu_device pointer @@ -763,10 +748,14 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = { .parse_cs = amdgpu_uvd_ring_parse_cs, .emit_ib = uvd_v4_2_ring_emit_ib, .emit_fence = uvd_v4_2_ring_emit_fence, + .emit_hdp_flush = uvd_v4_2_ring_emit_hdp_flush, + .emit_hdp_invalidate = uvd_v4_2_ring_emit_hdp_invalidate, .test_ring = uvd_v4_2_ring_test_ring, - .test_ib = uvd_v4_2_ring_test_ib, + .test_ib = amdgpu_uvd_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_uvd_ring_begin_use, + .end_use = amdgpu_uvd_ring_end_use, }; static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index e0a76a8..101de136 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -31,6 +31,7 @@ #include "uvd/uvd_5_0_sh_mask.h" #include "oss/oss_2_0_d.h" #include "oss/oss_2_0_sh_mask.h" +#include "bif/bif_5_0_d.h" #include "vi.h" static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev); @@ -489,6 +490,32 @@ static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq } /** + * uvd_v5_0_ring_emit_hdp_flush - emit an hdp flush + * + * @ring: amdgpu_ring pointer + * + * Emits an hdp flush. + */ +static void uvd_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0)); + amdgpu_ring_write(ring, 0); +} + +/** + * uvd_v5_0_ring_hdp_invalidate - emit an hdp invalidate + * + * @ring: amdgpu_ring pointer + * + * Emits an hdp invalidate. + */ +static void uvd_v5_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0)); + amdgpu_ring_write(ring, 1); +} + +/** * uvd_v5_0_ring_test_ring - register write test * * @ring: amdgpu_ring pointer @@ -550,49 +577,6 @@ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring, amdgpu_ring_write(ring, ib->length_dw); } -/** - * uvd_v5_0_ring_test_ib - test ib execution - * - * @ring: amdgpu_ring pointer - * - * Test if we can successfully execute an IB - */ -static int uvd_v5_0_ring_test_ib(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - struct fence *fence = NULL; - int r; - - r = amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); - if (r) { - DRM_ERROR("amdgpu: failed to raise UVD clocks (%d).\n", r); - return r; - } - - r = amdgpu_uvd_get_create_msg(ring, 1, NULL); - if (r) { - DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r); - goto error; - } - - r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence); - if (r) { - DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r); - goto error; - } - - r = fence_wait(fence, false); - if (r) { - DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); - goto error; - } - DRM_INFO("ib test on ring %d succeeded\n", ring->idx); -error: - fence_put(fence); - amdgpu_asic_set_uvd_clocks(adev, 0, 0); - return r; -} - static bool uvd_v5_0_is_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -815,10 +799,14 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = { .parse_cs = amdgpu_uvd_ring_parse_cs, .emit_ib = uvd_v5_0_ring_emit_ib, .emit_fence = uvd_v5_0_ring_emit_fence, + .emit_hdp_flush = uvd_v5_0_ring_emit_hdp_flush, + .emit_hdp_invalidate = uvd_v5_0_ring_emit_hdp_invalidate, .test_ring = uvd_v5_0_ring_test_ring, - .test_ib = uvd_v5_0_ring_test_ib, + .test_ib = amdgpu_uvd_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_uvd_ring_begin_use, + .end_use = amdgpu_uvd_ring_end_use, }; static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index c9929d6..7f21102 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -33,6 +33,8 @@ #include "oss/oss_2_0_sh_mask.h" #include "smu/smu_7_1_3_d.h" #include "smu/smu_7_1_3_sh_mask.h" +#include "bif/bif_5_1_d.h" +#include "gmc/gmc_8_1_d.h" #include "vi.h" static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev); @@ -385,8 +387,8 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) uint32_t mp_swap_cntl; int i, j, r; - /*disable DPG */ - WREG32_P(mmUVD_POWER_STATUS, 0, ~(1 << 2)); + /* disable DPG */ + WREG32_P(mmUVD_POWER_STATUS, 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); /* disable byte swapping */ lmi_swap_cntl = 0; @@ -405,17 +407,21 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) } /* disable interupt */ - WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1)); + WREG32_P(mmUVD_MASTINT_EN, 0, ~UVD_MASTINT_EN__VCPU_EN_MASK); /* stall UMC and register bus before resetting VCPU */ - WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); + WREG32_P(mmUVD_LMI_CTRL2, UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); mdelay(1); /* put LMI, VCPU, RBC etc... into reset */ - WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | - UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | - UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | - UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | + WREG32(mmUVD_SOFT_RESET, + UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | + UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | + UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | + UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | + UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | + UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | + UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); mdelay(5); @@ -424,8 +430,13 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) mdelay(5); /* initialize UVD memory controller */ - WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) | - (1 << 21) | (1 << 9) | (1 << 20)); + WREG32(mmUVD_LMI_CTRL, + (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | + UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | + UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | + UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | + UVD_LMI_CTRL__REQ_MODE_MASK | + UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK); #ifdef __BIG_ENDIAN /* swap (8 in 32) RB and IB */ @@ -447,10 +458,10 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) mdelay(5); /* enable VCPU clock */ - WREG32(mmUVD_VCPU_CNTL, 1 << 9); + WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK); /* enable UMC */ - WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); + WREG32_P(mmUVD_LMI_CTRL2, 0, ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); /* boot up the VCPU */ WREG32(mmUVD_SOFT_RESET, 0); @@ -484,10 +495,12 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) return r; } /* enable master interrupt */ - WREG32_P(mmUVD_MASTINT_EN, 3 << 1, ~(3 << 1)); + WREG32_P(mmUVD_MASTINT_EN, + (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK), + ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK)); /* clear the bit 4 of UVD_STATUS */ - WREG32_P(mmUVD_STATUS, 0, ~(2 << 1)); + WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); rb_bufsz = order_base_2(ring->ring_size); tmp = 0; @@ -581,6 +594,32 @@ static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq } /** + * uvd_v6_0_ring_emit_hdp_flush - emit an hdp flush + * + * @ring: amdgpu_ring pointer + * + * Emits an hdp flush. + */ +static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0)); + amdgpu_ring_write(ring, 0); +} + +/** + * uvd_v6_0_ring_hdp_invalidate - emit an hdp invalidate + * + * @ring: amdgpu_ring pointer + * + * Emits an hdp invalidate. + */ +static void uvd_v6_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0)); + amdgpu_ring_write(ring, 1); +} + +/** * uvd_v6_0_ring_test_ring - register write test * * @ring: amdgpu_ring pointer @@ -634,6 +673,9 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, unsigned vm_id, bool ctx_switch) { + amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0)); + amdgpu_ring_write(ring, vm_id); + amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0)); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0)); @@ -642,39 +684,55 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring, amdgpu_ring_write(ring, ib->length_dw); } -/** - * uvd_v6_0_ring_test_ib - test ib execution - * - * @ring: amdgpu_ring pointer - * - * Test if we can successfully execute an IB - */ -static int uvd_v6_0_ring_test_ib(struct amdgpu_ring *ring) +static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, + unsigned vm_id, uint64_t pd_addr) { - struct fence *fence = NULL; - int r; + uint32_t reg; - r = amdgpu_uvd_get_create_msg(ring, 1, NULL); - if (r) { - DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r); - goto error; - } + if (vm_id < 8) + reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id; + else + reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8; - r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence); - if (r) { - DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r); - goto error; - } + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); + amdgpu_ring_write(ring, reg << 2); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); + amdgpu_ring_write(ring, pd_addr >> 12); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); + amdgpu_ring_write(ring, 0x8); - r = fence_wait(fence, false); - if (r) { - DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); - goto error; - } - DRM_INFO("ib test on ring %d succeeded\n", ring->idx); -error: - fence_put(fence); - return r; + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); + amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); + amdgpu_ring_write(ring, 1 << vm_id); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); + amdgpu_ring_write(ring, 0x8); + + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); + amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0)); + amdgpu_ring_write(ring, 1 << vm_id); /* mask */ + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); + amdgpu_ring_write(ring, 0xC); +} + +static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) +{ + uint32_t seq = ring->fence_drv.sync_seq; + uint64_t addr = ring->fence_drv.gpu_addr; + + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); + amdgpu_ring_write(ring, lower_32_bits(addr)); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); + amdgpu_ring_write(ring, upper_32_bits(addr)); + amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0)); + amdgpu_ring_write(ring, 0xffffffff); /* mask */ + amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH9, 0)); + amdgpu_ring_write(ring, seq); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); + amdgpu_ring_write(ring, 0xE); } static bool uvd_v6_0_is_idle(void *handle) @@ -847,7 +905,8 @@ static int uvd_v6_0_set_clockgating_state(void *handle, bool enable = (state == AMD_CG_STATE_GATE) ? true : false; static int curstate = -1; - if (adev->asic_type == CHIP_FIJI) + if (adev->asic_type == CHIP_FIJI || + adev->asic_type == CHIP_POLARIS10) uvd_v6_set_bypass_mode(adev, enable); if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) @@ -912,22 +971,51 @@ const struct amd_ip_funcs uvd_v6_0_ip_funcs = { .set_powergating_state = uvd_v6_0_set_powergating_state, }; -static const struct amdgpu_ring_funcs uvd_v6_0_ring_funcs = { +static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = { .get_rptr = uvd_v6_0_ring_get_rptr, .get_wptr = uvd_v6_0_ring_get_wptr, .set_wptr = uvd_v6_0_ring_set_wptr, .parse_cs = amdgpu_uvd_ring_parse_cs, .emit_ib = uvd_v6_0_ring_emit_ib, .emit_fence = uvd_v6_0_ring_emit_fence, + .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush, + .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate, + .test_ring = uvd_v6_0_ring_test_ring, + .test_ib = amdgpu_uvd_ring_test_ib, + .insert_nop = amdgpu_ring_insert_nop, + .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_uvd_ring_begin_use, + .end_use = amdgpu_uvd_ring_end_use, +}; + +static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = { + .get_rptr = uvd_v6_0_ring_get_rptr, + .get_wptr = uvd_v6_0_ring_get_wptr, + .set_wptr = uvd_v6_0_ring_set_wptr, + .parse_cs = NULL, + .emit_ib = uvd_v6_0_ring_emit_ib, + .emit_fence = uvd_v6_0_ring_emit_fence, + .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush, + .emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync, + .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush, + .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate, .test_ring = uvd_v6_0_ring_test_ring, - .test_ib = uvd_v6_0_ring_test_ib, + .test_ib = amdgpu_uvd_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_uvd_ring_begin_use, + .end_use = amdgpu_uvd_ring_end_use, }; static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev) { - adev->uvd.ring.funcs = &uvd_v6_0_ring_funcs; + if (adev->asic_type >= CHIP_POLARIS10) { + adev->uvd.ring.funcs = &uvd_v6_0_ring_vm_funcs; + DRM_INFO("UVD is enabled in VM mode\n"); + } else { + adev->uvd.ring.funcs = &uvd_v6_0_ring_phys_funcs; + DRM_INFO("UVD is enabled in physical mode\n"); + } } static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c index 45d92ac..80a37a6 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c @@ -594,6 +594,8 @@ static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = { .test_ib = amdgpu_vce_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_vce_ring_begin_use, + .end_use = amdgpu_vce_ring_end_use, }; static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 30e8099..c271abf 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -43,6 +43,7 @@ #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618 +#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02 #define VCE_V3_0_FW_SIZE (384 * 1024) #define VCE_V3_0_STACK_SIZE (64 * 1024) @@ -51,6 +52,7 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx); static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev); static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev); +static int vce_v3_0_wait_for_idle(void *handle); /** * vce_v3_0_ring_get_rptr - get read pointer @@ -205,6 +207,32 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev, vce_v3_0_override_vce_clock_gating(adev, false); } +static int vce_v3_0_firmware_loaded(struct amdgpu_device *adev) +{ + int i, j; + + for (i = 0; i < 10; ++i) { + for (j = 0; j < 100; ++j) { + uint32_t status = RREG32(mmVCE_STATUS); + + if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK) + return 0; + mdelay(10); + } + + DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n"); + WREG32_P(mmVCE_SOFT_RESET, + VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, + ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + mdelay(10); + WREG32_P(mmVCE_SOFT_RESET, 0, + ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + mdelay(10); + } + + return -ETIMEDOUT; +} + /** * vce_v3_0_start - start VCE block * @@ -215,11 +243,24 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev, static int vce_v3_0_start(struct amdgpu_device *adev) { struct amdgpu_ring *ring; - int idx, i, j, r; + int idx, r; + + ring = &adev->vce.ring[0]; + WREG32(mmVCE_RB_RPTR, ring->wptr); + WREG32(mmVCE_RB_WPTR, ring->wptr); + WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr); + WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); + WREG32(mmVCE_RB_SIZE, ring->ring_size / 4); + + ring = &adev->vce.ring[1]; + WREG32(mmVCE_RB_RPTR2, ring->wptr); + WREG32(mmVCE_RB_WPTR2, ring->wptr); + WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr); + WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); + WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4); mutex_lock(&adev->grbm_idx_mutex); for (idx = 0; idx < 2; ++idx) { - if (adev->vce.harvest_config & (1 << idx)) continue; @@ -233,48 +274,24 @@ static int vce_v3_0_start(struct amdgpu_device *adev) vce_v3_0_mc_resume(adev, idx); - /* set BUSY flag */ - WREG32_P(mmVCE_STATUS, 1, ~1); + WREG32_P(mmVCE_STATUS, VCE_STATUS__JOB_BUSY_MASK, + ~VCE_STATUS__JOB_BUSY_MASK); + if (adev->asic_type >= CHIP_STONEY) WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001); else WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK); - WREG32_P(mmVCE_SOFT_RESET, - VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, - ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); - - mdelay(100); - WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); - for (i = 0; i < 10; ++i) { - uint32_t status; - for (j = 0; j < 100; ++j) { - status = RREG32(mmVCE_STATUS); - if (status & 2) - break; - mdelay(10); - } - r = 0; - if (status & 2) - break; - - DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n"); - WREG32_P(mmVCE_SOFT_RESET, - VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, - ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); - mdelay(10); - WREG32_P(mmVCE_SOFT_RESET, 0, - ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); - mdelay(10); - r = -1; - } + mdelay(100); + + r = vce_v3_0_firmware_loaded(adev); /* clear BUSY flag */ - WREG32_P(mmVCE_STATUS, 0, ~1); + WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK); /* Set Clock-Gating off */ if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG) @@ -290,19 +307,46 @@ static int vce_v3_0_start(struct amdgpu_device *adev) WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); mutex_unlock(&adev->grbm_idx_mutex); - ring = &adev->vce.ring[0]; - WREG32(mmVCE_RB_RPTR, ring->wptr); - WREG32(mmVCE_RB_WPTR, ring->wptr); - WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr); - WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); - WREG32(mmVCE_RB_SIZE, ring->ring_size / 4); + return 0; +} - ring = &adev->vce.ring[1]; - WREG32(mmVCE_RB_RPTR2, ring->wptr); - WREG32(mmVCE_RB_WPTR2, ring->wptr); - WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr); - WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); - WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4); +static int vce_v3_0_stop(struct amdgpu_device *adev) +{ + int idx; + + mutex_lock(&adev->grbm_idx_mutex); + for (idx = 0; idx < 2; ++idx) { + if (adev->vce.harvest_config & (1 << idx)) + continue; + + if (idx == 0) + WREG32_P(mmGRBM_GFX_INDEX, 0, + ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); + else + WREG32_P(mmGRBM_GFX_INDEX, + GRBM_GFX_INDEX__VCE_INSTANCE_MASK, + ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); + + if (adev->asic_type >= CHIP_STONEY) + WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001); + else + WREG32_P(mmVCE_VCPU_CNTL, 0, + ~VCE_VCPU_CNTL__CLK_EN_MASK); + /* hold on ECPU */ + WREG32_P(mmVCE_SOFT_RESET, + VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, + ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + + /* clear BUSY flag */ + WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK); + + /* Set Clock-Gating off */ + if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG) + vce_v3_0_set_vce_sw_clock_gating(adev, false); + } + + WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); + mutex_unlock(&adev->grbm_idx_mutex); return 0; } @@ -441,7 +485,14 @@ static int vce_v3_0_hw_init(void *handle) static int vce_v3_0_hw_fini(void *handle) { - return 0; + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + r = vce_v3_0_wait_for_idle(handle); + if (r) + return r; + + return vce_v3_0_stop(adev); } static int vce_v3_0_suspend(void *handle) @@ -604,6 +655,18 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev, return 0; } +static void vce_v3_set_bypass_mode(struct amdgpu_device *adev, bool enable) +{ + u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL); + + if (enable) + tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK; + else + tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK; + + WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp); +} + static int vce_v3_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -611,6 +674,9 @@ static int vce_v3_0_set_clockgating_state(void *handle, bool enable = (state == AMD_CG_STATE_GATE) ? true : false; int i; + if (adev->asic_type == CHIP_POLARIS10) + vce_v3_set_bypass_mode(adev, enable); + if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) return 0; @@ -701,6 +767,8 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = { .test_ib = amdgpu_vce_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_vce_ring_begin_use, + .end_use = amdgpu_vce_ring_end_use, }; static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index a65c960..03a31c5 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -203,6 +203,29 @@ static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) spin_unlock_irqrestore(&adev->didt_idx_lock, flags); } +static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg) +{ + unsigned long flags; + u32 r; + + spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); + WREG32(mmGC_CAC_IND_INDEX, (reg)); + r = RREG32(mmGC_CAC_IND_DATA); + spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); + return r; +} + +static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) +{ + unsigned long flags; + + spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); + WREG32(mmGC_CAC_IND_INDEX, (reg)); + WREG32(mmGC_CAC_IND_DATA, (v)); + spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); +} + + static const u32 tonga_mgcg_cgcg_init[] = { mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, @@ -533,12 +556,12 @@ static uint32_t vi_read_indexed_register(struct amdgpu_device *adev, u32 se_num, mutex_lock(&adev->grbm_idx_mutex); if (se_num != 0xffffffff || sh_num != 0xffffffff) - gfx_v8_0_select_se_sh(adev, se_num, sh_num); + amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); val = RREG32(reg_offset); if (se_num != 0xffffffff || sh_num != 0xffffffff) - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); return val; } @@ -597,7 +620,7 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num, return -EINVAL; } -static void vi_gpu_pci_config_reset(struct amdgpu_device *adev) +static int vi_gpu_pci_config_reset(struct amdgpu_device *adev) { u32 i; @@ -612,11 +635,14 @@ static void vi_gpu_pci_config_reset(struct amdgpu_device *adev) /* wait for asic to come out of reset */ for (i = 0; i < adev->usec_timeout; i++) { - if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) - break; + if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) { + /* enable BM */ + pci_set_master(adev->pdev); + return 0; + } udelay(1); } - + return -EINVAL; } static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung) @@ -642,13 +668,15 @@ static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hun */ static int vi_asic_reset(struct amdgpu_device *adev) { + int r; + vi_set_bios_scratch_engine_hung(adev, true); - vi_gpu_pci_config_reset(adev); + r = vi_gpu_pci_config_reset(adev); vi_set_bios_scratch_engine_hung(adev, false); - return 0; + return r; } static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock, @@ -1133,9 +1161,6 @@ static const struct amdgpu_asic_funcs vi_asic_funcs = .set_uvd_clocks = &vi_set_uvd_clocks, .set_vce_clocks = &vi_set_vce_clocks, .get_virtual_caps = &vi_get_virtual_caps, - /* these should be moved to their own ip modules */ - .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter, - .wait_for_mc_idle = &gmc_v8_0_mc_wait_for_idle, }; static int vi_common_early_init(void *handle) @@ -1156,6 +1181,8 @@ static int vi_common_early_init(void *handle) adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg; adev->didt_rreg = &vi_didt_rreg; adev->didt_wreg = &vi_didt_wreg; + adev->gc_cac_rreg = &vi_gc_cac_rreg; + adev->gc_cac_wreg = &vi_gc_cac_wreg; adev->asic_funcs = &vi_asic_funcs; @@ -1229,12 +1256,18 @@ static int vi_common_early_init(void *handle) adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS | + AMD_CG_SUPPORT_GFX_RLC_LS | + AMD_CG_SUPPORT_GFX_CP_LS | + AMD_CG_SUPPORT_GFX_CGTS | + AMD_CG_SUPPORT_GFX_MGLS | + AMD_CG_SUPPORT_GFX_CGTS_LS | + AMD_CG_SUPPORT_GFX_CGCG | + AMD_CG_SUPPORT_GFX_CGLS | AMD_CG_SUPPORT_BIF_LS | AMD_CG_SUPPORT_HDP_MGCG | AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_SDMA_MGCG | AMD_CG_SUPPORT_SDMA_LS; - adev->pg_flags = 0; adev->external_rev_id = adev->rev_id + 0x1; break; default: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index ec4036a..a625b91 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -187,12 +187,12 @@ int init_pipelines(struct device_queue_manager *dqm, unsigned int get_first_pipe(struct device_queue_manager *dqm); unsigned int get_pipes_num(struct device_queue_manager *dqm); -extern inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd) +static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd) { return (pdd->lds_base >> 16) & 0xFF; } -extern inline unsigned int +static inline unsigned int get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd) { return (pdd->lds_base >> 60) & 0x0E; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index d0d5f4b..80113c3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -617,10 +617,7 @@ int kgd2kfd_resume(struct kfd_dev *kfd); int kfd_init_apertures(struct kfd_process *process); /* Queue Context Management */ -inline uint32_t lower_32(uint64_t x); -inline uint32_t upper_32(uint64_t x); struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd); -inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m); int init_queue(struct queue **q, struct queue_properties properties); void uninit_queue(struct queue *q); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 7708d90..4f3849a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -63,13 +63,12 @@ static struct kfd_process *create_process(const struct task_struct *thread); void kfd_process_create_wq(void) { if (!kfd_process_wq) - kfd_process_wq = create_workqueue("kfd_process_wq"); + kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0); } void kfd_process_destroy_wq(void) { if (kfd_process_wq) { - flush_workqueue(kfd_process_wq); destroy_workqueue(kfd_process_wq); kfd_process_wq = NULL; } @@ -330,6 +329,7 @@ err_process_pqm_init: synchronize_rcu(); mmu_notifier_unregister_no_release(&process->mmu_notifier, process->mm); err_mmu_notifier: + mutex_destroy(&process->mutex); kfd_pasid_free(process->pasid); err_alloc_pasid: kfree(process->queues); diff --git a/drivers/gpu/drm/amd/include/amd_pcie.h b/drivers/gpu/drm/amd/include/amd_pcie.h index 7c2a916..5eb895f 100644 --- a/drivers/gpu/drm/amd/include/amd_pcie.h +++ b/drivers/gpu/drm/amd/include/amd_pcie.h @@ -37,6 +37,13 @@ #define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_MASK 0x0000FFFF #define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_SHIFT 0 +/* gen: chipset 1/2, asic 1/2/3 */ +#define AMDGPU_DEFAULT_PCIE_GEN_MASK (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 \ + | CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 \ + | CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 \ + | CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 \ + | CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3) + /* Following flags shows PCIe lane width switch supported in driver which are decided by chipset and ASIC */ #define CAIL_PCIE_LINK_WIDTH_SUPPORT_X1 0x00010000 #define CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 0x00020000 @@ -47,4 +54,11 @@ #define CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 0x00400000 #define CAIL_PCIE_LINK_WIDTH_SUPPORT_SHIFT 16 +/* 1/2/4/8/16 lanes */ +#define AMDGPU_DEFAULT_PCIE_MLW_MASK (CAIL_PCIE_LINK_WIDTH_SUPPORT_X1 \ + | CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 \ + | CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 \ + | CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 \ + | CAIL_PCIE_LINK_WIDTH_SUPPORT_X16) + #endif diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index afce1ed..a74a0d2 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -26,15 +26,6 @@ #define AMD_MAX_USEC_TIMEOUT 100000 /* 100 ms */ /* -* Supported GPU families (aligned with amdgpu_drm.h) -*/ -#define AMD_FAMILY_UNKNOWN 0 -#define AMD_FAMILY_CI 120 /* Bonaire, Hawaii */ -#define AMD_FAMILY_KV 125 /* Kaveri, Kabini, Mullins */ -#define AMD_FAMILY_VI 130 /* Iceland, Tonga */ -#define AMD_FAMILY_CZ 135 /* Carrizo */ - -/* * Supported ASIC types */ enum amd_asic_type { @@ -120,6 +111,8 @@ enum amd_powergating_state { #define AMD_PG_SUPPORT_SDMA (1 << 8) #define AMD_PG_SUPPORT_ACP (1 << 9) #define AMD_PG_SUPPORT_SAMU (1 << 10) +#define AMD_PG_SUPPORT_GFX_QUICK_MG (1 << 11) +#define AMD_PG_SUPPORT_GFX_PIPELINE (1 << 12) enum amd_pm_state_type { /* not used for dpm */ diff --git a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h index 2933297..809759f 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h @@ -27,6 +27,7 @@ #define mmMM_INDEX 0x0 #define mmMM_INDEX_HI 0x6 #define mmMM_DATA 0x1 +#define mmCC_BIF_BX_STRAP2 0x152A #define mmBIF_MM_INDACCESS_CNTL 0x1500 #define mmBIF_DOORBELL_APER_EN 0x1501 #define mmBUS_CNTL 0x1508 diff --git a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_d.h index ebaf67b..90ff7c8 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_d.h @@ -2823,4 +2823,7 @@ #define mmDC_EDC_CSINVOC_CNT 0x3192 #define mmDC_EDC_RESTORE_CNT 0x3193 +#define mmGC_CAC_IND_INDEX 0x129a +#define mmGC_CAC_IND_DATA 0x129b + #endif /* GFX_8_0_D_H */ diff --git a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_sh_mask.h index 7d72245..4070ca3 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_sh_mask.h @@ -8730,8 +8730,6 @@ #define RLC_GPM_STAT__DYN_CU_POWERING_DOWN__SHIFT 0x10 #define RLC_GPM_STAT__ABORTED_PD_SEQUENCE_MASK 0x20000 #define RLC_GPM_STAT__ABORTED_PD_SEQUENCE__SHIFT 0x11 -#define RLC_GPM_STAT__RESERVED_MASK 0xfc0000 -#define RLC_GPM_STAT__RESERVED__SHIFT 0x12 #define RLC_GPM_STAT__PG_ERROR_STATUS_MASK 0xff000000 #define RLC_GPM_STAT__PG_ERROR_STATUS__SHIFT 0x18 #define RLC_GPU_CLOCK_32_RES_SEL__RES_SEL_MASK 0x3f @@ -8764,8 +8762,10 @@ #define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE__SHIFT 0x12 #define RLC_PG_CNTL__SMU_HANDSHAKE_ENABLE_MASK 0x80000 #define RLC_PG_CNTL__SMU_HANDSHAKE_ENABLE__SHIFT 0x13 -#define RLC_PG_CNTL__RESERVED1_MASK 0xf00000 -#define RLC_PG_CNTL__RESERVED1__SHIFT 0x14 +#define RLC_PG_CNTL__QUICK_PG_ENABLE_MASK 0x100000 +#define RLC_PG_CNTL__QUICK_PG_ENABLE__SHIFT 0x14 +#define RLC_PG_CNTL__RESERVED1_MASK 0xe00000 +#define RLC_PG_CNTL__RESERVED1__SHIFT 0x15 #define RLC_GPM_THREAD_PRIORITY__THREAD0_PRIORITY_MASK 0xff #define RLC_GPM_THREAD_PRIORITY__THREAD0_PRIORITY__SHIFT 0x0 #define RLC_GPM_THREAD_PRIORITY__THREAD1_PRIORITY_MASK 0xff00 @@ -9102,8 +9102,6 @@ #define RLC_GPM_LOG_CONT__CONT__SHIFT 0x0 #define RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK 0xff #define RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT 0x0 -#define RLC_PG_DELAY_3__RESERVED_MASK 0xffffff00 -#define RLC_PG_DELAY_3__RESERVED__SHIFT 0x8 #define RLC_GPM_INT_DISABLE_TH0__DISABLE_MASK 0xffffffff #define RLC_GPM_INT_DISABLE_TH0__DISABLE__SHIFT 0x0 #define RLC_GPM_INT_DISABLE_TH1__DISABLE_MASK 0xffffffff @@ -9124,14 +9122,8 @@ #define RLC_SRM_DEBUG_SELECT__RESERVED__SHIFT 0x8 #define RLC_SRM_DEBUG__DATA_MASK 0xffffffff #define RLC_SRM_DEBUG__DATA__SHIFT 0x0 -#define RLC_SRM_ARAM_ADDR__ADDR_MASK 0x3ff -#define RLC_SRM_ARAM_ADDR__ADDR__SHIFT 0x0 -#define RLC_SRM_ARAM_ADDR__RESERVED_MASK 0xfffffc00 -#define RLC_SRM_ARAM_ADDR__RESERVED__SHIFT 0xa #define RLC_SRM_ARAM_DATA__DATA_MASK 0xffffffff #define RLC_SRM_ARAM_DATA__DATA__SHIFT 0x0 -#define RLC_SRM_DRAM_ADDR__ADDR_MASK 0x3ff -#define RLC_SRM_DRAM_ADDR__ADDR__SHIFT 0x0 #define RLC_SRM_DRAM_ADDR__RESERVED_MASK 0xfffffc00 #define RLC_SRM_DRAM_ADDR__RESERVED__SHIFT 0xa #define RLC_SRM_DRAM_DATA__DATA_MASK 0xffffffff @@ -17946,8 +17938,6 @@ #define VGT_TESS_DISTRIBUTION__ACCUM_TRI__SHIFT 0x8 #define VGT_TESS_DISTRIBUTION__ACCUM_QUAD_MASK 0xff0000 #define VGT_TESS_DISTRIBUTION__ACCUM_QUAD__SHIFT 0x10 -#define VGT_TESS_DISTRIBUTION__DONUT_SPLIT_MASK 0xff000000 -#define VGT_TESS_DISTRIBUTION__DONUT_SPLIT__SHIFT 0x18 #define VGT_TF_RING_SIZE__SIZE_MASK 0xffff #define VGT_TF_RING_SIZE__SIZE__SHIFT 0x0 #define VGT_SYS_CONFIG__DUAL_CORE_EN_MASK 0x1 @@ -20502,8 +20492,6 @@ #define DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT 0x4 #define DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20 #define DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5 -#define DIDT_SQ_CTRL0__UNUSED_0_MASK 0xffffffc0 -#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT 0x6 #define DIDT_SQ_CTRL1__MIN_POWER_MASK 0xffff #define DIDT_SQ_CTRL1__MIN_POWER__SHIFT 0x0 #define DIDT_SQ_CTRL1__MAX_POWER_MASK 0xffff0000 @@ -20558,8 +20546,6 @@ #define DIDT_DB_CTRL0__DIDT_CTRL_RST__SHIFT 0x4 #define DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20 #define DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5 -#define DIDT_DB_CTRL0__UNUSED_0_MASK 0xffffffc0 -#define DIDT_DB_CTRL0__UNUSED_0__SHIFT 0x6 #define DIDT_DB_CTRL1__MIN_POWER_MASK 0xffff #define DIDT_DB_CTRL1__MIN_POWER__SHIFT 0x0 #define DIDT_DB_CTRL1__MAX_POWER_MASK 0xffff0000 @@ -20614,8 +20600,6 @@ #define DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT 0x4 #define DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20 #define DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5 -#define DIDT_TD_CTRL0__UNUSED_0_MASK 0xffffffc0 -#define DIDT_TD_CTRL0__UNUSED_0__SHIFT 0x6 #define DIDT_TD_CTRL1__MIN_POWER_MASK 0xffff #define DIDT_TD_CTRL1__MIN_POWER__SHIFT 0x0 #define DIDT_TD_CTRL1__MAX_POWER_MASK 0xffff0000 @@ -20670,8 +20654,6 @@ #define DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT 0x4 #define DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20 #define DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5 -#define DIDT_TCP_CTRL0__UNUSED_0_MASK 0xffffffc0 -#define DIDT_TCP_CTRL0__UNUSED_0__SHIFT 0x6 #define DIDT_TCP_CTRL1__MIN_POWER_MASK 0xffff #define DIDT_TCP_CTRL1__MIN_POWER__SHIFT 0x0 #define DIDT_TCP_CTRL1__MAX_POWER_MASK 0xffff0000 @@ -20726,8 +20708,6 @@ #define DIDT_DBR_CTRL0__DIDT_CTRL_RST__SHIFT 0x4 #define DIDT_DBR_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20 #define DIDT_DBR_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5 -#define DIDT_DBR_CTRL0__UNUSED_0_MASK 0xffffffc0 -#define DIDT_DBR_CTRL0__UNUSED_0__SHIFT 0x6 #define DIDT_DBR_CTRL1__MIN_POWER_MASK 0xffff #define DIDT_DBR_CTRL1__MIN_POWER__SHIFT 0x0 #define DIDT_DBR_CTRL1__MAX_POWER_MASK 0xffff0000 @@ -20773,4 +20753,84 @@ #define DIDT_DBR_WEIGHT8_11__WEIGHT11_MASK 0xff000000 #define DIDT_DBR_WEIGHT8_11__WEIGHT11__SHIFT 0x18 +#define DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK 0x00000001 +#define DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT 0x00000000 + +#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK 0x0000007e +#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK 0x00001f80L +#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT 0x00000001 +#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT 0x00000007 + +#define DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK 0x1fffe000L +#define DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT 0x0000000d + +#define DIDT_SQ_STALL_CTRL__UNUSED_0_MASK 0xe0000000L +#define DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT 0x0000001d + +#define DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK 0x00000001L +#define DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT 0x00000000 + +#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK 0x00007ffeL +#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT 0x00000001 +#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK 0x1fff8000L +#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT 0x0000000f + +#define DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK 0x00000001L +#define DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT 0x00000000 + +#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK 0x0000007eL +#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK 0x00001f80L +#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT 0x00000001 +#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT 0x00000007 + +#define DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK 0x1fffe000L +#define DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT 0x0000000d + +#define DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK 0x00000fc0L +#define DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK 0x0003f000L +#define DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT 0x00000006 +#define DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT 0x0000000c + +#define DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK 0x00000001L +#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK 0x00007ffeL +#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK 0x1fff8000L + +#define DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT 0x00000000 +#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT 0x00000001 +#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT 0x0000000f + +#define DIDT_TD_STALL_CTRL__UNUSED_0_MASK 0xe0000000L +#define DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT 0x0000001d + +#define DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK 0x00000fc0L +#define DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK 0x0003f000L +#define DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT 0x00000006 +#define DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT 0x0000000c + +#define DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK 0x00000001L +#define DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT 0x00000000 + +#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK 0x0000007eL +#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK 0x00001f80L +#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT 0x00000001 +#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT 0x00000007 + +#define DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK 0x1fffe000L +#define DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT 0x0000000d + +#define DIDT_TCP_STALL_CTRL__UNUSED_0_MASK 0xe0000000L +#define DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT 0x0000001d + +#define DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK 0x00000001L +#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK 0x00007ffeL +#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK 0x1fff8000L +#define DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT 0x00000000 +#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT 0x00000001 +#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT 0x0000000f + +#define DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK 0x00000fc0L +#define DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK 0x0003f000L +#define DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT 0x00000006 +#define DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT 0x0000000c + #endif /* GFX_8_0_SH_MASK_H */ diff --git a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h index 6f6fb34..ec69869 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h @@ -111,6 +111,8 @@ #define mmUVD_MIF_RECON1_ADDR_CONFIG 0x39c5 #define ixUVD_MIF_SCLR_ADDR_CONFIG 0x4 #define mmUVD_JPEG_ADDR_CONFIG 0x3a1f +#define mmUVD_GP_SCRATCH8 0x3c0a +#define mmUVD_GP_SCRATCH9 0x3c0b #define mmUVD_GP_SCRATCH4 0x3d38 #endif /* UVD_6_0_D_H */ diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h index 7464daf..b86aba9 100644 --- a/drivers/gpu/drm/amd/include/cgs_common.h +++ b/drivers/gpu/drm/amd/include/cgs_common.h @@ -49,6 +49,7 @@ enum cgs_ind_reg { CGS_IND_REG__SMC, CGS_IND_REG__UVD_CTX, CGS_IND_REG__DIDT, + CGS_IND_REG_GC_CAC, CGS_IND_REG__AUDIO_ENDPT }; @@ -112,20 +113,23 @@ enum cgs_system_info_id { CGS_SYSTEM_INFO_ADAPTER_BDF_ID = 1, CGS_SYSTEM_INFO_PCIE_GEN_INFO, CGS_SYSTEM_INFO_PCIE_MLW, + CGS_SYSTEM_INFO_PCIE_DEV, + CGS_SYSTEM_INFO_PCIE_REV, CGS_SYSTEM_INFO_CG_FLAGS, CGS_SYSTEM_INFO_PG_FLAGS, CGS_SYSTEM_INFO_GFX_CU_INFO, + CGS_SYSTEM_INFO_GFX_SE_INFO, CGS_SYSTEM_INFO_ID_MAXIMUM, }; struct cgs_system_info { - uint64_t size; - uint64_t info_id; + uint64_t size; + enum cgs_system_info_id info_id; union { - void *ptr; - uint64_t value; + void *ptr; + uint64_t value; }; - uint64_t padding[13]; + uint64_t padding[13]; }; /* @@ -158,6 +162,10 @@ struct cgs_firmware_info { uint16_t feature_version; uint32_t image_size; uint64_t mc_addr; + + /* only for smc firmware */ + uint32_t ucode_start_address; + void *kptr; }; @@ -189,7 +197,6 @@ typedef unsigned long cgs_handle_t; struct cgs_acpi_method_argument { uint32_t type; - uint32_t method_length; uint32_t data_length; union{ uint32_t value; diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index e629f8a..abbb658 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -176,7 +176,7 @@ static int pp_hw_fini(void *handle) static bool pp_is_idle(void *handle) { - return 0; + return false; } static int pp_wait_for_idle(void *handle) @@ -536,6 +536,10 @@ int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input, case AMD_PP_EVENT_COMPLETE_INIT: ret = pem_handle_event(pp_handle->eventmgr, event_id, &data); break; + case AMD_PP_EVENT_READJUST_POWER_STATE: + pp_handle->hwmgr->current_ps = pp_handle->hwmgr->boot_ps; + ret = pem_handle_event(pp_handle->eventmgr, event_id, &data); + break; default: break; } @@ -740,12 +744,12 @@ static int pp_dpm_get_pp_table(void *handle, char **table) PP_CHECK_HW(hwmgr); - if (hwmgr->hwmgr_func->get_pp_table == NULL) { - printk(KERN_INFO "%s was not implemented.\n", __func__); - return 0; - } + if (!hwmgr->soft_pp_table) + return -EINVAL; - return hwmgr->hwmgr_func->get_pp_table(hwmgr, table); + *table = (char *)hwmgr->soft_pp_table; + + return hwmgr->soft_pp_table_size; } static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size) @@ -759,12 +763,23 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size) PP_CHECK_HW(hwmgr); - if (hwmgr->hwmgr_func->set_pp_table == NULL) { - printk(KERN_INFO "%s was not implemented.\n", __func__); - return 0; + if (!hwmgr->hardcode_pp_table) { + hwmgr->hardcode_pp_table = + kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL); + + if (!hwmgr->hardcode_pp_table) + return -ENOMEM; + + /* to avoid powerplay crash when hardcode pptable is empty */ + memcpy(hwmgr->hardcode_pp_table, hwmgr->soft_pp_table, + hwmgr->soft_pp_table_size); } - return hwmgr->hwmgr_func->set_pp_table(hwmgr, buf, size); + memcpy(hwmgr->hardcode_pp_table, buf, size); + + hwmgr->soft_pp_table = hwmgr->hardcode_pp_table; + + return amd_powerplay_reset(handle); } static int pp_dpm_force_clock_level(void *handle, @@ -806,6 +821,82 @@ static int pp_dpm_print_clock_levels(void *handle, return hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf); } +static int pp_dpm_get_sclk_od(void *handle) +{ + struct pp_hwmgr *hwmgr; + + if (!handle) + return -EINVAL; + + hwmgr = ((struct pp_instance *)handle)->hwmgr; + + PP_CHECK_HW(hwmgr); + + if (hwmgr->hwmgr_func->get_sclk_od == NULL) { + printk(KERN_INFO "%s was not implemented.\n", __func__); + return 0; + } + + return hwmgr->hwmgr_func->get_sclk_od(hwmgr); +} + +static int pp_dpm_set_sclk_od(void *handle, uint32_t value) +{ + struct pp_hwmgr *hwmgr; + + if (!handle) + return -EINVAL; + + hwmgr = ((struct pp_instance *)handle)->hwmgr; + + PP_CHECK_HW(hwmgr); + + if (hwmgr->hwmgr_func->set_sclk_od == NULL) { + printk(KERN_INFO "%s was not implemented.\n", __func__); + return 0; + } + + return hwmgr->hwmgr_func->set_sclk_od(hwmgr, value); +} + +static int pp_dpm_get_mclk_od(void *handle) +{ + struct pp_hwmgr *hwmgr; + + if (!handle) + return -EINVAL; + + hwmgr = ((struct pp_instance *)handle)->hwmgr; + + PP_CHECK_HW(hwmgr); + + if (hwmgr->hwmgr_func->get_mclk_od == NULL) { + printk(KERN_INFO "%s was not implemented.\n", __func__); + return 0; + } + + return hwmgr->hwmgr_func->get_mclk_od(hwmgr); +} + +static int pp_dpm_set_mclk_od(void *handle, uint32_t value) +{ + struct pp_hwmgr *hwmgr; + + if (!handle) + return -EINVAL; + + hwmgr = ((struct pp_instance *)handle)->hwmgr; + + PP_CHECK_HW(hwmgr); + + if (hwmgr->hwmgr_func->set_mclk_od == NULL) { + printk(KERN_INFO "%s was not implemented.\n", __func__); + return 0; + } + + return hwmgr->hwmgr_func->set_mclk_od(hwmgr, value); +} + const struct amd_powerplay_funcs pp_dpm_funcs = { .get_temperature = pp_dpm_get_temperature, .load_firmware = pp_dpm_load_fw, @@ -828,6 +919,10 @@ const struct amd_powerplay_funcs pp_dpm_funcs = { .set_pp_table = pp_dpm_set_pp_table, .force_clock_level = pp_dpm_force_clock_level, .print_clock_levels = pp_dpm_print_clock_levels, + .get_sclk_od = pp_dpm_get_sclk_od, + .set_sclk_od = pp_dpm_set_sclk_od, + .get_mclk_od = pp_dpm_get_mclk_od, + .set_mclk_od = pp_dpm_set_mclk_od, }; static int amd_pp_instance_init(struct amd_pp_init *pp_init, @@ -909,6 +1004,44 @@ int amd_powerplay_fini(void *handle) return 0; } +int amd_powerplay_reset(void *handle) +{ + struct pp_instance *instance = (struct pp_instance *)handle; + struct pp_eventmgr *eventmgr; + struct pem_event_data event_data = { {0} }; + int ret; + + if (instance == NULL) + return -EINVAL; + + eventmgr = instance->eventmgr; + if (!eventmgr || !eventmgr->pp_eventmgr_fini) + return -EINVAL; + + eventmgr->pp_eventmgr_fini(eventmgr); + + ret = pp_sw_fini(handle); + if (ret) + return ret; + + kfree(instance->hwmgr->ps); + + ret = pp_sw_init(handle); + if (ret) + return ret; + + hw_init_power_state_table(instance->hwmgr); + + if (eventmgr == NULL || eventmgr->pp_eventmgr_init == NULL) + return -EINVAL; + + ret = eventmgr->pp_eventmgr_init(eventmgr); + if (ret) + return ret; + + return pem_handle_event(eventmgr, AMD_PP_EVENT_COMPLETE_INIT, &event_data); +} + /* export this function to DAL */ int amd_powerplay_display_configuration_change(void *handle, diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c index d6635cc..635fc4b 100644 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c +++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c @@ -30,7 +30,6 @@ static const pem_event_action * const initialize_event[] = { system_config_tasks, setup_asic_tasks, enable_dynamic_state_management_tasks, - enable_clock_power_gatings_tasks, get_2d_performance_state_tasks, set_performance_state_tasks, initialize_thermal_controller_tasks, @@ -140,7 +139,6 @@ static const pem_event_action * const resume_event[] = { setup_asic_tasks, enable_stutter_mode_tasks, /*must do this in boot state and before SMC is started */ enable_dynamic_state_management_tasks, - enable_clock_power_gatings_tasks, enable_disable_bapm_tasks, initialize_thermal_controller_tasks, get_2d_performance_state_tasks, diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c index 5cd1234..b6f45fd 100644 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c +++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c @@ -132,8 +132,7 @@ int pem_task_enable_dynamic_state_management(struct pp_eventmgr *eventmgr, struc int pem_task_disable_dynamic_state_management(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) { - /* TODO */ - return 0; + return phm_disable_dynamic_state_management(eventmgr->hwmgr); } int pem_task_enable_clock_power_gatings_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c index 436fc16..2028980 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c @@ -177,12 +177,12 @@ int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) cz_dpm_powerdown_uvd(hwmgr); } else { cz_dpm_powerup_uvd(hwmgr); - cgs_set_clockgating_state(hwmgr->device, - AMD_IP_BLOCK_TYPE_UVD, - AMD_PG_STATE_GATE); cgs_set_powergating_state(hwmgr->device, AMD_IP_BLOCK_TYPE_UVD, AMD_CG_STATE_UNGATE); + cgs_set_clockgating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_UVD, + AMD_PG_STATE_GATE); cz_dpm_update_uvd_dpm(hwmgr, false); } @@ -206,25 +206,26 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) AMD_IP_BLOCK_TYPE_VCE, AMD_PG_STATE_GATE); cz_enable_disable_vce_dpm(hwmgr, false); - /* TODO: to figure out why vce can't be poweroff*/ + cz_dpm_powerdown_vce(hwmgr); cz_hwmgr->vce_power_gated = true; } else { cz_dpm_powerup_vce(hwmgr); cz_hwmgr->vce_power_gated = false; - cgs_set_clockgating_state( - hwmgr->device, - AMD_IP_BLOCK_TYPE_VCE, - AMD_PG_STATE_GATE); cgs_set_powergating_state( hwmgr->device, AMD_IP_BLOCK_TYPE_VCE, AMD_CG_STATE_UNGATE); + cgs_set_clockgating_state( + hwmgr->device, + AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_GATE); cz_dpm_update_vce_dpm(hwmgr); cz_enable_disable_vce_dpm(hwmgr, true); return 0; } } } else { + cz_hwmgr->vce_power_gated = bgate; cz_dpm_update_vce_dpm(hwmgr); cz_enable_disable_vce_dpm(hwmgr, !bgate); return 0; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c index 1f14c47..8cc0df9 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c @@ -1167,9 +1167,9 @@ static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, cz_ps->action = cz_current_ps->action; - if ((force_high == false) && (cz_ps->action == FORCE_HIGH)) + if (!force_high && (cz_ps->action == FORCE_HIGH)) cz_ps->action = CANCEL_FORCE_HIGH; - else if ((force_high == true) && (cz_ps->action != FORCE_HIGH)) + else if (force_high && (cz_ps->action != FORCE_HIGH)) cz_ps->action = FORCE_HIGH; else cz_ps->action = DO_NOTHING; @@ -1180,6 +1180,13 @@ static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr) { int result = 0; + struct cz_hwmgr *data; + + data = kzalloc(sizeof(struct cz_hwmgr), GFP_KERNEL); + if (data == NULL) + return -ENOMEM; + + hwmgr->backend = data; result = cz_initialize_dpm_defaults(hwmgr); if (result != 0) { @@ -1649,7 +1656,7 @@ static void cz_hw_print_display_cfg( struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend); uint32_t data = 0; - if (hw_data->cc6_settings.cc6_setting_changed == true) { + if (hw_data->cc6_settings.cc6_setting_changed) { hw_data->cc6_settings.cc6_setting_changed = false; @@ -1909,15 +1916,7 @@ static const struct pp_hwmgr_func cz_hwmgr_funcs = { int cz_hwmgr_init(struct pp_hwmgr *hwmgr) { - struct cz_hwmgr *cz_hwmgr; - int ret = 0; - - cz_hwmgr = kzalloc(sizeof(struct cz_hwmgr), GFP_KERNEL); - if (cz_hwmgr == NULL) - return -ENOMEM; - - hwmgr->backend = cz_hwmgr; hwmgr->hwmgr_func = &cz_hwmgr_funcs; hwmgr->pptable_func = &pptable_funcs; - return ret; + return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c index e1b649b..5afe820 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c @@ -56,7 +56,7 @@ int fiji_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) fiji_update_uvd_dpm(hwmgr, false); cgs_set_clockgating_state(hwmgr->device, AMD_IP_BLOCK_TYPE_UVD, - AMD_PG_STATE_UNGATE); + AMD_CG_STATE_UNGATE); } return 0; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c index 92912ab..120a9e2 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c @@ -581,25 +581,24 @@ static int fiji_patch_boot_state(struct pp_hwmgr *hwmgr, static int fiji_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) { - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - if (data->soft_pp_table) { - kfree(data->soft_pp_table); - data->soft_pp_table = NULL; - } - return phm_hwmgr_backend_fini(hwmgr); } static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr) { - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct fiji_hwmgr *data; uint32_t i; struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); bool stay_in_boot; int result; + data = kzalloc(sizeof(struct fiji_hwmgr), GFP_KERNEL); + if (data == NULL) + return -ENOMEM; + + hwmgr->backend = data; + data->dll_default_on = false; data->sram_end = SMC_RAM_END; @@ -699,7 +698,7 @@ static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr) if (0 == result) { struct cgs_system_info sys_info = {0}; - data->is_tlu_enabled = 0; + data->is_tlu_enabled = false; hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = FIJI_MAX_HARDWARE_POWERLEVELS; hwmgr->platform_descriptor.hardwarePerformanceLevels = 2; @@ -734,7 +733,7 @@ static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr) sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO; result = cgs_query_system_info(hwmgr->device, &sys_info); if (result) - data->pcie_gen_cap = 0x30007; + data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK; else data->pcie_gen_cap = (uint32_t)sys_info.value; if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) @@ -743,7 +742,7 @@ static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr) sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW; result = cgs_query_system_info(hwmgr->device, &sys_info); if (result) - data->pcie_lane_cap = 0x2f0000; + data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK; else data->pcie_lane_cap = (uint32_t)sys_info.value; } else { @@ -1236,6 +1235,34 @@ static int fiji_program_voting_clients(struct pp_hwmgr *hwmgr) return 0; } +static int fiji_clear_voting_clients(struct pp_hwmgr *hwmgr) +{ + /* Reset voting clients before disabling DPM */ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_0, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_1, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_2, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_3, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_4, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_5, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_6, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_7, 0); + + return 0; +} + /** * Get the location of various tables inside the FW image. * @@ -1363,6 +1390,17 @@ static int fiji_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr, } /** +* Call SMC to reset S0/S1 to S1 and Reset SMIO to initial value +* +* @param hwmgr the address of the powerplay hardware manager. +* @return if success then 0; +*/ +static int fiji_reset_to_default(struct pp_hwmgr *hwmgr) +{ + return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults); +} + +/** * Initial switch from ARB F0->F1 * * @param hwmgr the address of the powerplay hardware manager. @@ -1375,6 +1413,21 @@ static int fiji_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr) MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1); } +static int fiji_force_switch_to_arbf0(struct pp_hwmgr *hwmgr) +{ + uint32_t tmp; + + tmp = (cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, ixSMC_SCRATCH9) & + 0x0000ff00) >> 8; + + if (tmp == MC_CG_ARB_FREQ_F0) + return 0; + + return fiji_copy_and_switch_arb_sets(hwmgr, + tmp, MC_CG_ARB_FREQ_F0); +} + static int fiji_reset_single_dpm_table(struct pp_hwmgr *hwmgr, struct fiji_single_dpm_table *dpm_table, uint32_t count) { @@ -1397,7 +1450,7 @@ static void fiji_setup_pcie_table_entry( { dpm_table->dpm_levels[index].value = pcie_gen; dpm_table->dpm_levels[index].param1 = pcie_lanes; - dpm_table->dpm_levels[index].enabled = 1; + dpm_table->dpm_levels[index].enabled = true; } static int fiji_setup_default_pcie_table(struct pp_hwmgr *hwmgr) @@ -1609,7 +1662,6 @@ static int fiji_populate_cac_table(struct pp_hwmgr *hwmgr, { uint32_t count; uint8_t index; - int result = 0; struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); @@ -1631,7 +1683,7 @@ static int fiji_populate_cac_table(struct pp_hwmgr *hwmgr, VOLTAGE_SCALE)) / 25); } - return result; + return 0; } /** @@ -3177,6 +3229,17 @@ static int fiji_enable_ulv(struct pp_hwmgr *hwmgr) return 0; } +static int fiji_disable_ulv(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct fiji_ulv_parm *ulv = &(data->ulv); + + if (ulv->ulv_supported) + return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV); + + return 0; +} + static int fiji_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) { if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, @@ -3197,6 +3260,21 @@ static int fiji_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) return 0; } +static int fiji_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) +{ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkDeepSleep)) { + if (smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_MASTER_DeepSleep_OFF)) { + PP_ASSERT_WITH_CODE(false, + "Attempt to disable Master Deep Sleep switch failed!", + return -1); + } + } + + return 0; +} + static int fiji_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) { struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); @@ -3357,6 +3435,70 @@ static int fiji_start_dpm(struct pp_hwmgr *hwmgr) return 0; } +static int fiji_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + + /* disable SCLK dpm */ + if (!data->sclk_dpm_key_disabled) + PP_ASSERT_WITH_CODE( + (smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_DPM_Disable) == 0), + "Failed to disable SCLK DPM!", + return -1); + + /* disable MCLK dpm */ + if (!data->mclk_dpm_key_disabled) { + PP_ASSERT_WITH_CODE( + (smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_SetEnabledMask, 1) == 0), + "Failed to force MCLK DPM0!", + return -1); + + PP_ASSERT_WITH_CODE( + (smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_Disable) == 0), + "Failed to disable MCLK DPM!", + return -1); + } + + return 0; +} + +static int fiji_stop_dpm(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + + /* disable general power management */ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, + GLOBAL_PWRMGT_EN, 0); + /* disable sclk deep sleep */ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, + DYNAMIC_PM_EN, 0); + + /* disable PCIE dpm */ + if (!data->pcie_dpm_key_disabled) { + PP_ASSERT_WITH_CODE( + (smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_PCIeDPM_Disable) == 0), + "Failed to disable pcie DPM during DPM Stop Function!", + return -1); + } + + if (fiji_disable_sclk_mclk_dpm(hwmgr)) { + printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!"); + return -1; + } + + PP_ASSERT_WITH_CODE( + (smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_Voltage_Cntl_Disable) == 0), + "Failed to disable voltage DPM during DPM Stop Function!", + return -1); + + return 0; +} + static void fiji_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources) { @@ -3415,6 +3557,23 @@ static int fiji_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) return fiji_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); } +static int fiji_disable_auto_throttle_source(struct pp_hwmgr *hwmgr, + PHM_AutoThrottleSource source) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + + if (data->active_auto_throttle_sources & (1 << source)) { + data->active_auto_throttle_sources &= ~(1 << source); + fiji_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources); + } + return 0; +} + +static int fiji_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) +{ + return fiji_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); +} + static int fiji_enable_dpm_tasks(struct pp_hwmgr *hwmgr) { int tmp_result, result = 0; @@ -3529,6 +3688,64 @@ static int fiji_enable_dpm_tasks(struct pp_hwmgr *hwmgr) return result; } +static int fiji_disable_dpm_tasks(struct pp_hwmgr *hwmgr) +{ + int tmp_result, result = 0; + + tmp_result = (fiji_is_dpm_running(hwmgr)) ? 0 : -1; + PP_ASSERT_WITH_CODE(tmp_result == 0, + "DPM is not running right now, no need to disable DPM!", + return 0); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ThermalController)) + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1); + + tmp_result = fiji_disable_power_containment(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to disable power containment!", result = tmp_result); + + tmp_result = fiji_disable_smc_cac(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to disable SMC CAC!", result = tmp_result); + + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_SPLL_SPREAD_SPECTRUM, SSEN, 0); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0); + + tmp_result = fiji_disable_thermal_auto_throttle(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to disable thermal auto throttle!", result = tmp_result); + + tmp_result = fiji_stop_dpm(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to stop DPM!", result = tmp_result); + + tmp_result = fiji_disable_deep_sleep_master_switch(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to disable deep sleep master switch!", result = tmp_result); + + tmp_result = fiji_disable_ulv(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to disable ULV!", result = tmp_result); + + tmp_result = fiji_clear_voting_clients(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to clear voting clients!", result = tmp_result); + + tmp_result = fiji_reset_to_default(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to reset to default!", result = tmp_result); + + tmp_result = fiji_force_switch_to_arbf0(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to force to switch arbf0!", result = tmp_result); + + return result; +} + static int fiji_force_dpm_highest(struct pp_hwmgr *hwmgr) { struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); @@ -4171,8 +4388,9 @@ static int fiji_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) if ((0 == data->sclk_dpm_key_disabled) && (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { - PP_ASSERT_WITH_CODE(true == fiji_is_dpm_running(hwmgr), - "Trying to freeze SCLK DPM when DPM is disabled",); + PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr), + "Trying to freeze SCLK DPM when DPM is disabled", + ); PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_SCLKDPM_FreezeLevel), "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!", @@ -4182,8 +4400,9 @@ static int fiji_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) if ((0 == data->mclk_dpm_key_disabled) && (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { - PP_ASSERT_WITH_CODE(true == fiji_is_dpm_running(hwmgr), - "Trying to freeze MCLK DPM when DPM is disabled",); + PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr), + "Trying to freeze MCLK DPM when DPM is disabled", + ); PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MCLKDPM_FreezeLevel), "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!", @@ -4353,7 +4572,6 @@ static int fiji_trim_single_dpm_states(struct pp_hwmgr *hwmgr, static int fiji_trim_dpm_states(struct pp_hwmgr *hwmgr, const struct fiji_power_state *fiji_ps) { - int result = 0; struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); uint32_t high_limit_count; @@ -4373,7 +4591,7 @@ static int fiji_trim_dpm_states(struct pp_hwmgr *hwmgr, fiji_ps->performance_levels[0].memory_clock, fiji_ps->performance_levels[high_limit_count].memory_clock); - return result; + return 0; } static int fiji_generate_dpm_level_enable_mask( @@ -4632,8 +4850,9 @@ static int fiji_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { - PP_ASSERT_WITH_CODE(true == fiji_is_dpm_running(hwmgr), - "Trying to Unfreeze SCLK DPM when DPM is disabled",); + PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr), + "Trying to Unfreeze SCLK DPM when DPM is disabled", + ); PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel), "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!", @@ -4643,8 +4862,9 @@ static int fiji_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) if ((0 == data->mclk_dpm_key_disabled) && (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { - PP_ASSERT_WITH_CODE(true == fiji_is_dpm_running(hwmgr), - "Trying to Unfreeze MCLK DPM when DPM is disabled",); + PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr), + "Trying to Unfreeze MCLK DPM when DPM is disabled", + ); PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel), "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!", @@ -5071,42 +5291,6 @@ static int fiji_get_fan_control_mode(struct pp_hwmgr *hwmgr) CG_FDO_CTRL2, FDO_PWM_MODE); } -static int fiji_get_pp_table(struct pp_hwmgr *hwmgr, char **table) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - if (!data->soft_pp_table) { - data->soft_pp_table = kmemdup(hwmgr->soft_pp_table, - hwmgr->soft_pp_table_size, - GFP_KERNEL); - if (!data->soft_pp_table) - return -ENOMEM; - } - - *table = (char *)&data->soft_pp_table; - - return hwmgr->soft_pp_table_size; -} - -static int fiji_set_pp_table(struct pp_hwmgr *hwmgr, const char *buf, size_t size) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - if (!data->soft_pp_table) { - data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL); - if (!data->soft_pp_table) - return -ENOMEM; - } - - memcpy(data->soft_pp_table, buf, size); - - hwmgr->soft_pp_table = data->soft_pp_table; - - /* TODO: re-init powerplay to implement modified pptable */ - - return 0; -} - static int fiji_force_clock_level(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask) { @@ -5276,12 +5460,96 @@ bool fiji_check_smc_update_required_for_display_configuration(struct pp_hwmgr *h return is_update_required; } +static int fiji_get_sclk_od(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct fiji_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); + struct fiji_single_dpm_table *golden_sclk_table = + &(data->golden_dpm_table.sclk_table); + int value; + + value = (sclk_table->dpm_levels[sclk_table->count - 1].value - + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) * + 100 / + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; + + return value; +} + +static int fiji_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct fiji_single_dpm_table *golden_sclk_table = + &(data->golden_dpm_table.sclk_table); + struct pp_power_state *ps; + struct fiji_power_state *fiji_ps; + + if (value > 20) + value = 20; + + ps = hwmgr->request_ps; + + if (ps == NULL) + return -EINVAL; + + fiji_ps = cast_phw_fiji_power_state(&ps->hardware); + + fiji_ps->performance_levels[fiji_ps->performance_level_count - 1].engine_clock = + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * + value / 100 + + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; + + return 0; +} + +static int fiji_get_mclk_od(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct fiji_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); + struct fiji_single_dpm_table *golden_mclk_table = + &(data->golden_dpm_table.mclk_table); + int value; + + value = (mclk_table->dpm_levels[mclk_table->count - 1].value - + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) * + 100 / + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; + + return value; +} + +static int fiji_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct fiji_single_dpm_table *golden_mclk_table = + &(data->golden_dpm_table.mclk_table); + struct pp_power_state *ps; + struct fiji_power_state *fiji_ps; + + if (value > 20) + value = 20; + + ps = hwmgr->request_ps; + + if (ps == NULL) + return -EINVAL; + + fiji_ps = cast_phw_fiji_power_state(&ps->hardware); + + fiji_ps->performance_levels[fiji_ps->performance_level_count - 1].memory_clock = + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value * + value / 100 + + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; + + return 0; +} static const struct pp_hwmgr_func fiji_hwmgr_funcs = { .backend_init = &fiji_hwmgr_backend_init, .backend_fini = &fiji_hwmgr_backend_fini, .asic_setup = &fiji_setup_asic_task, .dynamic_state_management_enable = &fiji_enable_dpm_tasks, + .dynamic_state_management_disable = &fiji_disable_dpm_tasks, .force_dpm_level = &fiji_dpm_force_dpm_level, .get_num_of_pp_table_entries = &tonga_get_number_of_powerplay_table_entries, .get_power_state_size = &fiji_get_power_state_size, @@ -5314,24 +5582,18 @@ static const struct pp_hwmgr_func fiji_hwmgr_funcs = { .get_fan_control_mode = fiji_get_fan_control_mode, .check_states_equal = fiji_check_states_equal, .check_smc_update_required_for_display_configuration = fiji_check_smc_update_required_for_display_configuration, - .get_pp_table = fiji_get_pp_table, - .set_pp_table = fiji_set_pp_table, .force_clock_level = fiji_force_clock_level, .print_clock_levels = fiji_print_clock_levels, + .get_sclk_od = fiji_get_sclk_od, + .set_sclk_od = fiji_set_sclk_od, + .get_mclk_od = fiji_get_mclk_od, + .set_mclk_od = fiji_set_mclk_od, }; int fiji_hwmgr_init(struct pp_hwmgr *hwmgr) { - struct fiji_hwmgr *data; - int ret = 0; - - data = kzalloc(sizeof(struct fiji_hwmgr), GFP_KERNEL); - if (data == NULL) - return -ENOMEM; - - hwmgr->backend = data; hwmgr->hwmgr_func = &fiji_hwmgr_funcs; hwmgr->pptable_func = &tonga_pptable_funcs; pp_fiji_thermal_initialize(hwmgr); - return ret; + return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h index 170edf5..bf67c2a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h @@ -302,9 +302,6 @@ struct fiji_hwmgr { bool pg_acp_init; bool frtc_enabled; bool frtc_status_changed; - - /* soft pptable for re-uploading into smu */ - void *soft_pp_table; }; /* To convert to Q8.8 format for firmware */ diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c index db23a40..4465845 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c @@ -73,17 +73,18 @@ void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) if (!tmp) { phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_CAC); fiji_hwmgr->fast_watermark_threshold = 100; - tmp = 1; - fiji_hwmgr->enable_dte_feature = tmp ? false : true; - fiji_hwmgr->enable_tdc_limit_feature = tmp ? true : false; - fiji_hwmgr->enable_pkg_pwr_tracking_feature = tmp ? true : false; + if (hwmgr->powercontainment_enabled) { + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment); + tmp = 1; + fiji_hwmgr->enable_dte_feature = tmp ? false : true; + fiji_hwmgr->enable_tdc_limit_feature = tmp ? true : false; + fiji_hwmgr->enable_pkg_pwr_tracking_feature = tmp ? true : false; + } } } @@ -459,6 +460,23 @@ int fiji_enable_smc_cac(struct pp_hwmgr *hwmgr) return result; } +int fiji_disable_smc_cac(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + int result = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CAC) && data->cac_enabled) { + int smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_DisableCac)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to disable CAC in SMC.", result = -1); + + data->cac_enabled = false; + } + return result; +} + int fiji_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n) { struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); @@ -528,6 +546,48 @@ int fiji_enable_power_containment(struct pp_hwmgr *hwmgr) return result; } +int fiji_disable_power_containment(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + int result = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment) && + data->power_containment_features) { + int smc_result; + + if (data->power_containment_features & + POWERCONTAINMENT_FEATURE_TDCLimit) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_TDCLimitDisable)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to disable TDCLimit in SMC.", + result = smc_result); + } + + if (data->power_containment_features & + POWERCONTAINMENT_FEATURE_DTE) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_DisableDTE)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to disable DTE in SMC.", + result = smc_result); + } + + if (data->power_containment_features & + POWERCONTAINMENT_FEATURE_PkgPwrLimit) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to disable PkgPwrTracking in SMC.", + result = smc_result); + } + data->power_containment_features = 0; + } + + return result; +} + int fiji_power_control_set_level(struct pp_hwmgr *hwmgr) { struct phm_ppt_v1_information *table_info = diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h index 55e5820..fec7724 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h @@ -36,6 +36,19 @@ enum fiji_pt_config_reg_type { #define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002 #define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004 +#define DIDT_SQ_CTRL0__UNUSED_0_MASK 0xffffffc0 +#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT 0x6 +#define DIDT_TD_CTRL0__UNUSED_0_MASK 0xffffffc0 +#define DIDT_TD_CTRL0__UNUSED_0__SHIFT 0x6 +#define DIDT_TCP_CTRL0__UNUSED_0_MASK 0xffffffc0 +#define DIDT_TCP_CTRL0__UNUSED_0__SHIFT 0x6 +#define DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK 0xe0000000 +#define DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001d +#define DIDT_TD_TUNING_CTRL__UNUSED_0_MASK 0xe0000000 +#define DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001d +#define DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK 0xe0000000 +#define DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001d + struct fiji_pt_config_reg { uint32_t offset; uint32_t mask; @@ -58,7 +71,9 @@ void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr); int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr); int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr); int fiji_enable_smc_cac(struct pp_hwmgr *hwmgr); +int fiji_disable_smc_cac(struct pp_hwmgr *hwmgr); int fiji_enable_power_containment(struct pp_hwmgr *hwmgr); +int fiji_disable_power_containment(struct pp_hwmgr *hwmgr); int fiji_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n); int fiji_power_control_set_level(struct pp_hwmgr *hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c index 7a705ce..a6abe81 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c @@ -59,8 +59,8 @@ int phm_dispatch_table(struct pp_hwmgr *hwmgr, struct phm_runtime_table_header *rt_table, void *input, void *output) { - int result = 0; - void *temp_storage = NULL; + int result; + void *temp_storage; if (hwmgr == NULL || rt_table == NULL) { printk(KERN_ERR "[ powerplay ] Invalid Parameter!\n"); @@ -73,12 +73,13 @@ int phm_dispatch_table(struct pp_hwmgr *hwmgr, printk(KERN_ERR "[ powerplay ] Could not allocate table temporary storage\n"); return -ENOMEM; } + } else { + temp_storage = NULL; } result = phm_run_table(hwmgr, rt_table, input, output, temp_storage); - if (NULL != temp_storage) - kfree(temp_storage); + kfree(temp_storage); return result; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c index efb77ed..789f98a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c @@ -154,6 +154,30 @@ int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr) return ret; } +int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr) +{ + int ret = -1; + bool enabled; + + PHM_FUNC_CHECK(hwmgr); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TablelessHardwareInterface)) { + if (hwmgr->hwmgr_func->dynamic_state_management_disable) + ret = hwmgr->hwmgr_func->dynamic_state_management_disable(hwmgr); + } else { + ret = phm_dispatch_table(hwmgr, + &(hwmgr->disable_dynamic_state_management), + NULL, NULL); + } + + enabled = ret == 0 ? false : true; + + cgs_notify_dpm_enabled(hwmgr->device, enabled); + + return ret; +} + int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level) { PHM_FUNC_CHECK(hwmgr); @@ -314,7 +338,7 @@ int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr, if (hwmgr->hwmgr_func->store_cc6_data == NULL) return -EINVAL; - /* to do pass other display configuration in furture */ + /* TODO: pass other display configuration in the future */ if (hwmgr->hwmgr_func->store_cc6_data) hwmgr->hwmgr_func->store_cc6_data(hwmgr, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 20f20e0..27e0762 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -24,6 +24,7 @@ #include <linux/types.h> #include <linux/kernel.h> #include <linux/slab.h> +#include <drm/amdgpu_drm.h> #include "cgs_common.h" #include "power_state.h" #include "hwmgr.h" @@ -58,12 +59,13 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle) hwmgr->hw_revision = pp_init->rev_id; hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT; hwmgr->power_source = PP_PowerSource_AC; + hwmgr->powercontainment_enabled = pp_init->powercontainment_enabled; switch (hwmgr->chip_family) { - case AMD_FAMILY_CZ: + case AMDGPU_FAMILY_CZ: cz_hwmgr_init(hwmgr); break; - case AMD_FAMILY_VI: + case AMDGPU_FAMILY_VI: switch (hwmgr->chip_id) { case CHIP_TONGA: tonga_hwmgr_init(hwmgr); @@ -94,6 +96,8 @@ int hwmgr_fini(struct pp_hwmgr *hwmgr) return -EINVAL; /* do hwmgr finish*/ + kfree(hwmgr->hardcode_pp_table); + kfree(hwmgr->backend); kfree(hwmgr->start_thermal_controller.function_list); @@ -530,7 +534,7 @@ int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr /* initialize vddc_dep_on_dal_pwrl table */ table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record); - table_clk_vlt = (struct phm_clock_voltage_dependency_table *)kzalloc(table_size, GFP_KERNEL); + table_clk_vlt = kzalloc(table_size, GFP_KERNEL); if (NULL == table_clk_vlt) { printk(KERN_ERR "[ powerplay ] Can not allocate space for vddc_dep_on_dal_pwrl! \n"); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c index 8f142a7..b5edb51 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c @@ -106,11 +106,17 @@ int polaris10_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) data->uvd_power_gated = bgate; if (bgate) { + cgs_set_clockgating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_UVD, + AMD_CG_STATE_GATE); polaris10_update_uvd_dpm(hwmgr, true); polaris10_phm_powerdown_uvd(hwmgr); } else { polaris10_phm_powerup_uvd(hwmgr); polaris10_update_uvd_dpm(hwmgr, false); + cgs_set_clockgating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_UVD, + AMD_CG_STATE_UNGATE); } return 0; @@ -125,11 +131,19 @@ int polaris10_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) data->vce_power_gated = bgate; - if (bgate) + if (bgate) { + cgs_set_clockgating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_VCE, + AMD_CG_STATE_GATE); + polaris10_update_vce_dpm(hwmgr, true); polaris10_phm_powerdown_vce(hwmgr); - else + } else { polaris10_phm_powerup_vce(hwmgr); - + polaris10_update_vce_dpm(hwmgr, false); + cgs_set_clockgating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_VCE, + AMD_CG_STATE_UNGATE); + } return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c index 91e25f9..769636a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c @@ -389,6 +389,34 @@ static int polaris10_program_voting_clients(struct pp_hwmgr *hwmgr) return 0; } +static int polaris10_clear_voting_clients(struct pp_hwmgr *hwmgr) +{ + /* Reset voting clients before disabling DPM */ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_0, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_1, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_2, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_3, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_4, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_5, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_6, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_7, 0); + + return 0; +} + /** * Get the location of various tables inside the FW image. * @@ -515,6 +543,11 @@ static int polaris10_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr, return 0; } +static int polaris10_reset_to_default(struct pp_hwmgr *hwmgr) +{ + return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults); +} + /** * Initial switch from ARB F0->F1 * @@ -528,6 +561,21 @@ static int polaris10_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr) MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1); } +static int polaris10_force_switch_to_arbf0(struct pp_hwmgr *hwmgr) +{ + uint32_t tmp; + + tmp = (cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, ixSMC_SCRATCH9) & + 0x0000ff00) >> 8; + + if (tmp == MC_CG_ARB_FREQ_F0) + return 0; + + return polaris10_copy_and_switch_arb_sets(hwmgr, + tmp, MC_CG_ARB_FREQ_F0); +} + static int polaris10_setup_default_pcie_table(struct pp_hwmgr *hwmgr) { struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); @@ -1356,9 +1404,9 @@ static int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr) return result; } - /* in order to prevent MC activity from stutter mode to push DPM up. + /* In order to prevent MC activity from stutter mode to push DPM up, * the UVD change complements this by putting the MCLK in - * a higher state by default such that we are not effected by + * a higher state by default such that we are not affected by * up threshold or and MCLK DPM latency. */ levels[0].ActivityLevel = 0x1f; @@ -1425,7 +1473,7 @@ static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr, /* Get MinVoltage and Frequency from DPM0, * already converted to SMC_UL */ - sclk_frequency = data->dpm_table.sclk_table.dpm_levels[0].value; + sclk_frequency = data->vbios_boot_state.sclk_bootup_value; result = polaris10_get_dependency_volt_by_clk(hwmgr, table_info->vdd_dep_on_sclk, sclk_frequency, @@ -1461,8 +1509,7 @@ static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr, /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */ - table->MemoryACPILevel.MclkFrequency = - data->dpm_table.mclk_table.dpm_levels[0].value; + table->MemoryACPILevel.MclkFrequency = data->vbios_boot_state.mclk_bootup_value; result = polaris10_get_dependency_volt_by_clk(hwmgr, table_info->vdd_dep_on_mclk, table->MemoryACPILevel.MclkFrequency, @@ -1780,7 +1827,7 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) { uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min; struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0; + uint8_t i, stretch_amount, volt_offset = 0; struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = @@ -1831,11 +1878,8 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6; /* Populate CKS Lookup Table */ - if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5) - stretch_amount2 = 0; - else if (stretch_amount == 3 || stretch_amount == 4) - stretch_amount2 = 1; - else { + if (stretch_amount != 1 && stretch_amount != 2 && stretch_amount != 3 && + stretch_amount != 4 && stretch_amount != 5) { phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ClockStretcher); PP_ASSERT_WITH_CODE(false, @@ -1890,9 +1934,8 @@ static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr, if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) { config = VR_SVI2_PLANE_2; table->VRConfig |= (config << VRCONF_MVDD_SHIFT); - } else if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) { - config = VR_SMIO_PATTERN_2; - table->VRConfig |= (config << VRCONF_MVDD_SHIFT); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + + offsetof(SMU74_SoftRegisters, AllowMvddSwitch), 0x1); } else { config = VR_STATIC_VOLTAGE; table->VRConfig |= (config << VRCONF_MVDD_SHIFT); @@ -2262,6 +2305,17 @@ static int polaris10_enable_ulv(struct pp_hwmgr *hwmgr) return 0; } +static int polaris10_disable_ulv(struct pp_hwmgr *hwmgr) +{ + struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + struct polaris10_ulv_parm *ulv = &(data->ulv); + + if (ulv->ulv_supported) + return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV); + + return 0; +} + static int polaris10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) { if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, @@ -2282,6 +2336,21 @@ static int polaris10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) return 0; } +static int polaris10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) +{ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkDeepSleep)) { + if (smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_MASTER_DeepSleep_OFF)) { + PP_ASSERT_WITH_CODE(false, + "Attempt to disable Master Deep Sleep switch failed!", + return -1); + } + } + + return 0; +} + static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) { struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); @@ -2379,6 +2448,58 @@ static int polaris10_start_dpm(struct pp_hwmgr *hwmgr) return 0; } +static int polaris10_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) +{ + struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + + /* disable SCLK dpm */ + if (!data->sclk_dpm_key_disabled) + PP_ASSERT_WITH_CODE( + (smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_DPM_Disable) == 0), + "Failed to disable SCLK DPM!", + return -1); + + /* disable MCLK dpm */ + if (!data->mclk_dpm_key_disabled) { + PP_ASSERT_WITH_CODE( + (smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_Disable) == 0), + "Failed to disable MCLK DPM!", + return -1); + } + + return 0; +} + +static int polaris10_stop_dpm(struct pp_hwmgr *hwmgr) +{ + struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + + /* disable general power management */ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, + GLOBAL_PWRMGT_EN, 0); + /* disable sclk deep sleep */ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, + DYNAMIC_PM_EN, 0); + + /* disable PCIE dpm */ + if (!data->pcie_dpm_key_disabled) { + PP_ASSERT_WITH_CODE( + (smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_PCIeDPM_Disable) == 0), + "Failed to disable pcie DPM during DPM Stop Function!", + return -1); + } + + if (polaris10_disable_sclk_mclk_dpm(hwmgr)) { + printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!"); + return -1; + } + + return 0; +} + static void polaris10_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources) { bool protection; @@ -2436,6 +2557,23 @@ static int polaris10_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) return polaris10_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); } +static int polaris10_disable_auto_throttle_source(struct pp_hwmgr *hwmgr, + PHM_AutoThrottleSource source) +{ + struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + + if (data->active_auto_throttle_sources & (1 << source)) { + data->active_auto_throttle_sources &= ~(1 << source); + polaris10_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources); + } + return 0; +} + +static int polaris10_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) +{ + return polaris10_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); +} + int polaris10_pcie_performance_request(struct pp_hwmgr *hwmgr) { struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); @@ -2530,6 +2668,10 @@ int polaris10_enable_dpm_tasks(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to enable deep sleep master switch!", result = tmp_result); + tmp_result = polaris10_enable_didt_config(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to enable deep sleep master switch!", result = tmp_result); + tmp_result = polaris10_start_dpm(hwmgr); PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to start DPM!", result = tmp_result); @@ -2559,8 +2701,60 @@ int polaris10_enable_dpm_tasks(struct pp_hwmgr *hwmgr) int polaris10_disable_dpm_tasks(struct pp_hwmgr *hwmgr) { + int tmp_result, result = 0; - return 0; + tmp_result = (polaris10_is_dpm_running(hwmgr)) ? 0 : -1; + PP_ASSERT_WITH_CODE(tmp_result == 0, + "DPM is not running right now, no need to disable DPM!", + return 0); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ThermalController)) + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1); + + tmp_result = polaris10_disable_power_containment(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to disable power containment!", result = tmp_result); + + tmp_result = polaris10_disable_smc_cac(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to disable SMC CAC!", result = tmp_result); + + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_SPLL_SPREAD_SPECTRUM, SSEN, 0); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0); + + tmp_result = polaris10_disable_thermal_auto_throttle(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to disable thermal auto throttle!", result = tmp_result); + + tmp_result = polaris10_stop_dpm(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to stop DPM!", result = tmp_result); + + tmp_result = polaris10_disable_deep_sleep_master_switch(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to disable deep sleep master switch!", result = tmp_result); + + tmp_result = polaris10_disable_ulv(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to disable ULV!", result = tmp_result); + + tmp_result = polaris10_clear_voting_clients(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to clear voting clients!", result = tmp_result); + + tmp_result = polaris10_reset_to_default(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to reset to default!", result = tmp_result); + + tmp_result = polaris10_force_switch_to_arbf0(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to force to switch arbf0!", result = tmp_result); + + return result; } int polaris10_reset_asic_tasks(struct pp_hwmgr *hwmgr) @@ -2571,13 +2765,6 @@ int polaris10_reset_asic_tasks(struct pp_hwmgr *hwmgr) int polaris10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) { - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - if (data->soft_pp_table) { - kfree(data->soft_pp_table); - data->soft_pp_table = NULL; - } - return phm_hwmgr_backend_fini(hwmgr); } @@ -2624,17 +2811,22 @@ int polaris10_set_features_platform_caps(struct pp_hwmgr *hwmgr) PHM_PlatformCaps_DynamicUVDState); /* power tune caps Assume disabled */ - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment); + if (hwmgr->powercontainment_enabled) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment); + else + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_CAC); @@ -2706,12 +2898,12 @@ static int polaris10_get_evv_voltages(struct pp_hwmgr *hwmgr) } } - - PP_ASSERT_WITH_CODE(0 == atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, - VOLTAGE_TYPE_VDDC, sclk, vv_id, &vddc), - "Error retrieving EVV voltage value!", - continue); - + if (atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, + VOLTAGE_TYPE_VDDC, + sclk, vv_id, &vddc) != 0) { + printk(KERN_WARNING "failed to retrieving EVV voltage!\n"); + continue; + } /* need to make sure vddc is less than 2v or else, it could burn the ASIC. * real voltage level in unit of 0.01mv */ @@ -2968,13 +3160,19 @@ int polaris10_patch_voltage_workaround(struct pp_hwmgr *hwmgr) int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) { - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + struct polaris10_hwmgr *data; struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment; uint32_t temp_reg; int result; struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); + data = kzalloc(sizeof(struct polaris10_hwmgr), GFP_KERNEL); + if (data == NULL) + return -ENOMEM; + + hwmgr->backend = data; + data->dll_default_on = false; data->sram_end = SMC_RAM_END; data->mclk_dpm0_activity_target = 0xa; @@ -3063,7 +3261,7 @@ int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) if (0 == result) { struct cgs_system_info sys_info = {0}; - data->is_tlu_enabled = 0; + data->is_tlu_enabled = false; hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = POLARIS10_MAX_HARDWARE_POWERLEVELS; @@ -3148,7 +3346,7 @@ int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO; result = cgs_query_system_info(hwmgr->device, &sys_info); if (result) - data->pcie_gen_cap = 0x30007; + data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK; else data->pcie_gen_cap = (uint32_t)sys_info.value; if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) @@ -3157,7 +3355,7 @@ int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW; result = cgs_query_system_info(hwmgr->device, &sys_info); if (result) - data->pcie_lane_cap = 0x2f0000; + data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK; else data->pcie_lane_cap = (uint32_t)sys_info.value; @@ -3446,6 +3644,7 @@ static int polaris10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); + disable_mclk_switching = (1 < info.display_count) || disable_mclk_switching_for_frame_lock; @@ -3950,8 +4149,8 @@ static int polaris10_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) if ((0 == data->sclk_dpm_key_disabled) && (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { - PP_ASSERT_WITH_CODE(true == polaris10_is_dpm_running(hwmgr), - "Trying to freeze SCLK DPM when DPM is disabled", + PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr), + "Trying to freeze SCLK DPM when DPM is disabled", ); PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_SCLKDPM_FreezeLevel), @@ -3962,8 +4161,8 @@ static int polaris10_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) if ((0 == data->mclk_dpm_key_disabled) && (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { - PP_ASSERT_WITH_CODE(true == polaris10_is_dpm_running(hwmgr), - "Trying to freeze MCLK DPM when DPM is disabled", + PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr), + "Trying to freeze MCLK DPM when DPM is disabled", ); PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MCLKDPM_FreezeLevel), @@ -4123,7 +4322,6 @@ static int polaris10_trim_single_dpm_states(struct pp_hwmgr *hwmgr, static int polaris10_trim_dpm_states(struct pp_hwmgr *hwmgr, const struct polaris10_power_state *polaris10_ps) { - int result = 0; struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); uint32_t high_limit_count; @@ -4143,7 +4341,7 @@ static int polaris10_trim_dpm_states(struct pp_hwmgr *hwmgr, polaris10_ps->performance_levels[0].memory_clock, polaris10_ps->performance_levels[high_limit_count].memory_clock); - return result; + return 0; } static int polaris10_generate_dpm_level_enable_mask( @@ -4226,25 +4424,20 @@ int polaris10_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate) return polaris10_enable_disable_uvd_dpm(hwmgr, !bgate); } -static int polaris10_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input) +int polaris10_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate) { - const struct phm_set_power_state_input *states = - (const struct phm_set_power_state_input *)input; struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - const struct polaris10_power_state *polaris10_nps = - cast_const_phw_polaris10_power_state(states->pnew_state); - const struct polaris10_power_state *polaris10_cps = - cast_const_phw_polaris10_power_state(states->pcurrent_state); - uint32_t mm_boot_level_offset, mm_boot_level_value; struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - if (polaris10_nps->vce_clks.evclk > 0 && - (polaris10_cps == NULL || polaris10_cps->vce_clks.evclk == 0)) { - - data->smc_state_table.VceBootLevel = + if (!bgate) { + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StablePState)) + data->smc_state_table.VceBootLevel = (uint8_t) (table_info->mm_dep_table->count - 1); + else + data->smc_state_table.VceBootLevel = 0; mm_boot_level_offset = data->dpm_table_start + offsetof(SMU74_Discrete_DpmTable, VceBootLevel); @@ -4257,18 +4450,14 @@ static int polaris10_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input) cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) { + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_VCEDPM_SetEnabledMask, (uint32_t)1 << data->smc_state_table.VceBootLevel); - - polaris10_enable_disable_vce_dpm(hwmgr, true); - } else if (polaris10_nps->vce_clks.evclk == 0 && - polaris10_cps != NULL && - polaris10_cps->vce_clks.evclk > 0) - polaris10_enable_disable_vce_dpm(hwmgr, false); } + polaris10_enable_disable_vce_dpm(hwmgr, !bgate); + return 0; } @@ -4353,8 +4542,8 @@ static int polaris10_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { - PP_ASSERT_WITH_CODE(true == polaris10_is_dpm_running(hwmgr), - "Trying to Unfreeze SCLK DPM when DPM is disabled", + PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr), + "Trying to Unfreeze SCLK DPM when DPM is disabled", ); PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel), @@ -4365,8 +4554,8 @@ static int polaris10_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) if ((0 == data->mclk_dpm_key_disabled) && (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { - PP_ASSERT_WITH_CODE(true == polaris10_is_dpm_running(hwmgr), - "Trying to Unfreeze MCLK DPM when DPM is disabled", + PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr), + "Trying to Unfreeze MCLK DPM when DPM is disabled", ); PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel), @@ -4422,6 +4611,8 @@ static int polaris10_notify_smc_display(struct pp_hwmgr *hwmgr) return (smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL; } + + static int polaris10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) { int tmp_result, result = 0; @@ -4455,11 +4646,6 @@ static int polaris10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *i "Failed to generate DPM level enabled mask!", result = tmp_result); - tmp_result = polaris10_update_vce_dpm(hwmgr, input); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to update VCE DPM!", - result = tmp_result); - tmp_result = polaris10_update_sclk_threshold(hwmgr); PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to update SCLK threshold!", @@ -4530,6 +4716,7 @@ int polaris10_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwm if (num_active_displays > 1) /* to do && (pHwMgr->pPECI->displayConfiguration.bMultiMonitorInSync != TRUE)) */ polaris10_notify_smc_display_change(hwmgr, false); + return 0; } @@ -4579,6 +4766,7 @@ int polaris10_program_display_gap(struct pp_hwmgr *hwmgr) cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU74_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us)); + return 0; } @@ -4820,42 +5008,6 @@ int polaris10_setup_asic_task(struct pp_hwmgr *hwmgr) return result; } -static int polaris10_get_pp_table(struct pp_hwmgr *hwmgr, char **table) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - if (!data->soft_pp_table) { - data->soft_pp_table = kmemdup(hwmgr->soft_pp_table, - hwmgr->soft_pp_table_size, - GFP_KERNEL); - if (!data->soft_pp_table) - return -ENOMEM; - } - - *table = (char *)&data->soft_pp_table; - - return hwmgr->soft_pp_table_size; -} - -static int polaris10_set_pp_table(struct pp_hwmgr *hwmgr, const char *buf, size_t size) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - if (!data->soft_pp_table) { - data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL); - if (!data->soft_pp_table) - return -ENOMEM; - } - - memcpy(data->soft_pp_table, buf, size); - - hwmgr->soft_pp_table = data->soft_pp_table; - - /* TODO: re-init powerplay to implement modified pptable */ - - return 0; -} - static int polaris10_force_clock_level(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask) { @@ -4998,6 +5150,89 @@ static int polaris10_get_fan_control_mode(struct pp_hwmgr *hwmgr) CG_FDO_CTRL2, FDO_PWM_MODE); } +static int polaris10_get_sclk_od(struct pp_hwmgr *hwmgr) +{ + struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + struct polaris10_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); + struct polaris10_single_dpm_table *golden_sclk_table = + &(data->golden_dpm_table.sclk_table); + int value; + + value = (sclk_table->dpm_levels[sclk_table->count - 1].value - + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) * + 100 / + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; + + return value; +} + +static int polaris10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value) +{ + struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + struct polaris10_single_dpm_table *golden_sclk_table = + &(data->golden_dpm_table.sclk_table); + struct pp_power_state *ps; + struct polaris10_power_state *polaris10_ps; + + if (value > 20) + value = 20; + + ps = hwmgr->request_ps; + + if (ps == NULL) + return -EINVAL; + + polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware); + + polaris10_ps->performance_levels[polaris10_ps->performance_level_count - 1].engine_clock = + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * + value / 100 + + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; + + return 0; +} + +static int polaris10_get_mclk_od(struct pp_hwmgr *hwmgr) +{ + struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + struct polaris10_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); + struct polaris10_single_dpm_table *golden_mclk_table = + &(data->golden_dpm_table.mclk_table); + int value; + + value = (mclk_table->dpm_levels[mclk_table->count - 1].value - + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) * + 100 / + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; + + return value; +} + +static int polaris10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value) +{ + struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + struct polaris10_single_dpm_table *golden_mclk_table = + &(data->golden_dpm_table.mclk_table); + struct pp_power_state *ps; + struct polaris10_power_state *polaris10_ps; + + if (value > 20) + value = 20; + + ps = hwmgr->request_ps; + + if (ps == NULL) + return -EINVAL; + + polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware); + + polaris10_ps->performance_levels[polaris10_ps->performance_level_count - 1].memory_clock = + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value * + value / 100 + + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; + + return 0; +} static const struct pp_hwmgr_func polaris10_hwmgr_funcs = { .backend_init = &polaris10_hwmgr_backend_init, .backend_fini = &polaris10_hwmgr_backend_fini, @@ -5036,22 +5271,17 @@ static const struct pp_hwmgr_func polaris10_hwmgr_funcs = { .check_states_equal = polaris10_check_states_equal, .set_fan_control_mode = polaris10_set_fan_control_mode, .get_fan_control_mode = polaris10_get_fan_control_mode, - .get_pp_table = polaris10_get_pp_table, - .set_pp_table = polaris10_set_pp_table, .force_clock_level = polaris10_force_clock_level, .print_clock_levels = polaris10_print_clock_levels, .enable_per_cu_power_gating = polaris10_phm_enable_per_cu_power_gating, + .get_sclk_od = polaris10_get_sclk_od, + .set_sclk_od = polaris10_set_sclk_od, + .get_mclk_od = polaris10_get_mclk_od, + .set_mclk_od = polaris10_set_mclk_od, }; int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr) { - struct polaris10_hwmgr *data; - - data = kzalloc (sizeof(struct polaris10_hwmgr), GFP_KERNEL); - if (data == NULL) - return -ENOMEM; - - hwmgr->backend = data; hwmgr->hwmgr_func = &polaris10_hwmgr_funcs; hwmgr->pptable_func = &tonga_pptable_funcs; pp_polaris10_thermal_initialize(hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h index afc3434..33c3394 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h @@ -309,10 +309,6 @@ struct polaris10_hwmgr { uint32_t up_hyst; uint32_t disable_dpm_mask; bool apply_optimized_settings; - - /* soft pptable for re-uploading into smu */ - void *soft_pp_table; - uint32_t avfs_vdroop_override_setting; bool apply_avfs_cks_off_voltage; uint32_t frame_time_x2; @@ -356,6 +352,6 @@ int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr); int polaris10_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate); int polaris10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate); int polaris10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable); - +int polaris10_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate); #endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c index ae96f14..b9cb240 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c @@ -28,10 +28,360 @@ #include "polaris10_smumgr.h" #include "smu74_discrete.h" #include "pp_debug.h" +#include "gca/gfx_8_0_d.h" +#include "gca/gfx_8_0_sh_mask.h" +#include "oss/oss_3_0_sh_mask.h" #define VOLTAGE_SCALE 4 #define POWERTUNE_DEFAULT_SET_MAX 1 +uint32_t DIDTBlock_Info = SQ_IR_MASK | TCP_IR_MASK | TD_PCC_MASK; + +struct polaris10_pt_config_reg GCCACConfig_Polaris10[] = { +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value Type + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060013, POLARIS10_CONFIGREG_GC_CAC_IND }, + + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0013, POLARIS10_CONFIGREG_GC_CAC_IND }, + + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00100013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00900013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01100013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01900013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02100013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02900013, POLARIS10_CONFIGREG_GC_CAC_IND }, + + { 0xFFFFFFFF } +}; + +struct polaris10_pt_config_reg GCCACConfig_Polaris11[] = { +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value Type + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060011, POLARIS10_CONFIGREG_GC_CAC_IND }, + + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0011, POLARIS10_CONFIGREG_GC_CAC_IND }, + + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00100011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00900011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01100011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01900011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02100011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02900011, POLARIS10_CONFIGREG_GC_CAC_IND }, + + { 0xFFFFFFFF } +}; + +struct polaris10_pt_config_reg DIDTConfig_Polaris10[] = { +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value Type + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT, 0x0073, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT, 0x00ab, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT, 0x0067, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT, 0x0027, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT, 0x00aa, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_0_MASK, DIDT_TD_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_1_MASK, DIDT_TD_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_2_MASK, DIDT_TD_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__UNUSED_0_MASK, DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__UNUSED_0_MASK, DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__USE_REF_CLOCK_MASK, DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0009, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0009, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__UNUSED_0_MASK, DIDT_TD_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT, 0x0004, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT, 0x0037, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT, 0x0054, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__UNUSED_0_MASK, DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_0_MASK, DIDT_TCP_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x0032, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_1_MASK, DIDT_TCP_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_2_MASK, DIDT_TCP_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { 0xFFFFFFFF } +}; + +struct polaris10_pt_config_reg DIDTConfig_Polaris11[] = { +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value Type + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT, 0x0073, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT, 0x00ab, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT, 0x0067, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT, 0x0027, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT, 0x00aa, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_0_MASK, DIDT_TD_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_1_MASK, DIDT_TD_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_2_MASK, DIDT_TD_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__UNUSED_0_MASK, DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__UNUSED_0_MASK, DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__USE_REF_CLOCK_MASK, DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0008, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0008, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__UNUSED_0_MASK, DIDT_TD_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT, 0x0004, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT, 0x0037, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT, 0x0054, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__UNUSED_0_MASK, DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_0_MASK, DIDT_TCP_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x0032, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_1_MASK, DIDT_TCP_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_2_MASK, DIDT_TCP_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { 0xFFFFFFFF } +}; + static const struct polaris10_pt_defaults polaris10_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = { /* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt, * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */ @@ -209,6 +559,187 @@ static int polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr) return 0; } +static int polaris10_enable_didt(struct pp_hwmgr *hwmgr, const bool enable) +{ + + uint32_t en = enable ? 1 : 0; + int32_t result = 0; + uint32_t data; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping)) { + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0); + data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK; + data |= ((en << DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0, data); + DIDTBlock_Info &= ~SQ_Enable_MASK; + DIDTBlock_Info |= en << SQ_Enable_SHIFT; + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping)) { + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0); + data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK; + data |= ((en << DIDT_DB_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0, data); + DIDTBlock_Info &= ~DB_Enable_MASK; + DIDTBlock_Info |= en << DB_Enable_SHIFT; + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping)) { + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0); + data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK; + data |= ((en << DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0, data); + DIDTBlock_Info &= ~TD_Enable_MASK; + DIDTBlock_Info |= en << TD_Enable_SHIFT; + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) { + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0); + data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK; + data |= ((en << DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0, data); + DIDTBlock_Info &= ~TCP_Enable_MASK; + DIDTBlock_Info |= en << TCP_Enable_SHIFT; + } + + if (enable) + result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_Didt_Block_Function, DIDTBlock_Info); + + return result; +} + +static int polaris10_program_pt_config_registers(struct pp_hwmgr *hwmgr, + struct polaris10_pt_config_reg *cac_config_regs) +{ + struct polaris10_pt_config_reg *config_regs = cac_config_regs; + uint32_t cache = 0; + uint32_t data = 0; + + PP_ASSERT_WITH_CODE((config_regs != NULL), "Invalid config register table.", return -EINVAL); + + while (config_regs->offset != 0xFFFFFFFF) { + if (config_regs->type == POLARIS10_CONFIGREG_CACHE) + cache |= ((config_regs->value << config_regs->shift) & config_regs->mask); + else { + switch (config_regs->type) { + case POLARIS10_CONFIGREG_SMC_IND: + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, config_regs->offset); + break; + + case POLARIS10_CONFIGREG_DIDT_IND: + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset); + break; + + case POLARIS10_CONFIGREG_GC_CAC_IND: + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset); + break; + + default: + data = cgs_read_register(hwmgr->device, config_regs->offset); + break; + } + + data &= ~config_regs->mask; + data |= ((config_regs->value << config_regs->shift) & config_regs->mask); + data |= cache; + + switch (config_regs->type) { + case POLARIS10_CONFIGREG_SMC_IND: + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, config_regs->offset, data); + break; + + case POLARIS10_CONFIGREG_DIDT_IND: + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset, data); + break; + + case POLARIS10_CONFIGREG_GC_CAC_IND: + cgs_write_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset, data); + break; + + default: + cgs_write_register(hwmgr->device, config_regs->offset, data); + break; + } + cache = 0; + } + + config_regs++; + } + + return 0; +} + +int polaris10_enable_didt_config(struct pp_hwmgr *hwmgr) +{ + int result; + uint32_t num_se = 0; + uint32_t count, value, value2; + struct cgs_system_info sys_info = {0}; + + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_GFX_SE_INFO; + result = cgs_query_system_info(hwmgr->device, &sys_info); + + + if (result == 0) + num_se = sys_info.value; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) { + + /* TO DO Pre DIDT disable clock gating */ + value = 0; + value2 = cgs_read_register(hwmgr->device, mmGRBM_GFX_INDEX); + for (count = 0; count < num_se; count++) { + value = SYS_GRBM_GFX_INDEX_DATA__INSTANCE_BROADCAST_WRITES_MASK + | SYS_GRBM_GFX_INDEX_DATA__SH_BROADCAST_WRITES_MASK + | (count << SYS_GRBM_GFX_INDEX_DATA__SE_INDEX__SHIFT); + cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value); + + if (hwmgr->chip_id == CHIP_POLARIS10) { + result = polaris10_program_pt_config_registers(hwmgr, GCCACConfig_Polaris10); + PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); + result = polaris10_program_pt_config_registers(hwmgr, DIDTConfig_Polaris10); + PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); + } else if (hwmgr->chip_id == CHIP_POLARIS11) { + result = polaris10_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11); + PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); + result = polaris10_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11); + PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); + } + } + cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value2); + + result = polaris10_enable_didt(hwmgr, true); + PP_ASSERT_WITH_CODE((result == 0), "EnableDiDt failed.", return result); + + /* TO DO Post DIDT enable clock gating */ + } + + return 0; +} + +int polaris10_disable_didt_config(struct pp_hwmgr *hwmgr) +{ + int result; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) { + /* TO DO Pre DIDT disable clock gating */ + + result = polaris10_enable_didt(hwmgr, false); + PP_ASSERT_WITH_CODE((result == 0), "Post DIDT enable clock gating failed.", return result); + /* TO DO Post DIDT enable clock gating */ + } + + return 0; +} + + static int polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) { struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); @@ -312,6 +843,23 @@ int polaris10_enable_smc_cac(struct pp_hwmgr *hwmgr) return result; } +int polaris10_disable_smc_cac(struct pp_hwmgr *hwmgr) +{ + struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + int result = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CAC) && data->cac_enabled) { + int smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_DisableCac)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to disable CAC in SMC.", result = -1); + + data->cac_enabled = false; + } + return result; +} + int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n) { struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); @@ -373,6 +921,48 @@ int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr) return result; } +int polaris10_disable_power_containment(struct pp_hwmgr *hwmgr) +{ + struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + int result = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment) && + data->power_containment_features) { + int smc_result; + + if (data->power_containment_features & + POWERCONTAINMENT_FEATURE_TDCLimit) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_TDCLimitDisable)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to disable TDCLimit in SMC.", + result = smc_result); + } + + if (data->power_containment_features & + POWERCONTAINMENT_FEATURE_DTE) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_DisableDTE)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to disable DTE in SMC.", + result = smc_result); + } + + if (data->power_containment_features & + POWERCONTAINMENT_FEATURE_PkgPwrLimit) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to disable PkgPwrTracking in SMC.", + result = smc_result); + } + data->power_containment_features = 0; + } + + return result; +} + int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr) { struct phm_ppt_v1_information *table_info = diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h index 68bc1cb..bc78e28 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h @@ -27,15 +27,37 @@ enum polaris10_pt_config_reg_type { POLARIS10_CONFIGREG_MMR = 0, POLARIS10_CONFIGREG_SMC_IND, POLARIS10_CONFIGREG_DIDT_IND, + POLARIS10_CONFIGREG_GC_CAC_IND, POLARIS10_CONFIGREG_CACHE, POLARIS10_CONFIGREG_MAX }; +#define DIDT_SQ_CTRL0__UNUSED_0_MASK 0xfffc0000 +#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT 0x12 +#define DIDT_TD_CTRL0__UNUSED_0_MASK 0xfffc0000 +#define DIDT_TD_CTRL0__UNUSED_0__SHIFT 0x12 +#define DIDT_TCP_CTRL0__UNUSED_0_MASK 0xfffc0000 +#define DIDT_TCP_CTRL0__UNUSED_0__SHIFT 0x12 +#define DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK 0xc0000000 +#define DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001e +#define DIDT_TD_TUNING_CTRL__UNUSED_0_MASK 0xc0000000 +#define DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001e +#define DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK 0xc0000000 +#define DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001e + /* PowerContainment Features */ #define POWERCONTAINMENT_FEATURE_DTE 0x00000001 #define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002 #define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004 +#define ixGC_CAC_CNTL 0x0000 +#define ixDIDT_SQ_STALL_CTRL 0x0004 +#define ixDIDT_SQ_TUNING_CTRL 0x0005 +#define ixDIDT_TD_STALL_CTRL 0x0044 +#define ixDIDT_TD_TUNING_CTRL 0x0045 +#define ixDIDT_TCP_STALL_CTRL 0x0064 +#define ixDIDT_TCP_TUNING_CTRL 0x0065 + struct polaris10_pt_config_reg { uint32_t offset; uint32_t mask; @@ -62,9 +84,11 @@ void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr); int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr); int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr); int polaris10_enable_smc_cac(struct pp_hwmgr *hwmgr); +int polaris10_disable_smc_cac(struct pp_hwmgr *hwmgr); int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr); +int polaris10_disable_power_containment(struct pp_hwmgr *hwmgr); int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n); int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr); - +int polaris10_enable_didt_config(struct pp_hwmgr *hwmgr); #endif /* POLARIS10_POWERTUNE_H */ diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c index a3c38bb..1944d28 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c @@ -66,7 +66,7 @@ int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise) int result; struct cgs_system_info info = {0}; - if( 0 != acpi_atcs_notify_pcie_device_ready(device)) + if (acpi_atcs_notify_pcie_device_ready(device)) return -EINVAL; info.size = sizeof(struct cgs_system_info); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c index 90b35c5..26f3e30 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c @@ -179,13 +179,12 @@ int atomctrl_set_engine_dram_timings_rv770( /* They are both in 10KHz Units. */ engine_clock_parameters.ulTargetEngineClock = - (uint32_t) engine_clock & SET_CLOCK_FREQ_MASK; - engine_clock_parameters.ulTargetEngineClock |= - (COMPUTE_ENGINE_PLL_PARAM << 24); + cpu_to_le32((engine_clock & SET_CLOCK_FREQ_MASK) | + ((COMPUTE_ENGINE_PLL_PARAM << 24))); /* in 10 khz units.*/ engine_clock_parameters.sReserved.ulClock = - (uint32_t) memory_clock & SET_CLOCK_FREQ_MASK; + cpu_to_le32(memory_clock & SET_CLOCK_FREQ_MASK); return cgs_atom_exec_cmd_table(hwmgr->device, GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings), &engine_clock_parameters); @@ -252,7 +251,7 @@ int atomctrl_get_memory_pll_dividers_si( COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1 mpll_parameters; int result; - mpll_parameters.ulClock = (uint32_t) clock_value; + mpll_parameters.ulClock = cpu_to_le32(clock_value); mpll_parameters.ucInputFlag = (uint8_t)((strobe_mode) ? 1 : 0); result = cgs_atom_exec_cmd_table @@ -262,9 +261,9 @@ int atomctrl_get_memory_pll_dividers_si( if (0 == result) { mpll_param->mpll_fb_divider.clk_frac = - mpll_parameters.ulFbDiv.usFbDivFrac; + le16_to_cpu(mpll_parameters.ulFbDiv.usFbDivFrac); mpll_param->mpll_fb_divider.cl_kf = - mpll_parameters.ulFbDiv.usFbDiv; + le16_to_cpu(mpll_parameters.ulFbDiv.usFbDiv); mpll_param->mpll_post_divider = (uint32_t)mpll_parameters.ucPostDiv; mpll_param->vco_mode = @@ -300,7 +299,7 @@ int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr, COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2 mpll_parameters; int result; - mpll_parameters.ulClock.ulClock = (uint32_t)clock_value; + mpll_parameters.ulClock.ulClock = cpu_to_le32(clock_value); result = cgs_atom_exec_cmd_table(hwmgr->device, GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam), @@ -320,7 +319,7 @@ int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr, COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 pll_parameters; int result; - pll_parameters.ulClock = clock_value; + pll_parameters.ulClock = cpu_to_le32(clock_value); result = cgs_atom_exec_cmd_table (hwmgr->device, @@ -329,7 +328,7 @@ int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr, if (0 == result) { dividers->pll_post_divider = pll_parameters.ucPostDiv; - dividers->real_clock = pll_parameters.ulClock; + dividers->real_clock = le32_to_cpu(pll_parameters.ulClock); } return result; @@ -343,7 +342,7 @@ int atomctrl_get_engine_pll_dividers_vi( COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters; int result; - pll_patameters.ulClock.ulClock = clock_value; + pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value); pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK; result = cgs_atom_exec_cmd_table @@ -355,12 +354,12 @@ int atomctrl_get_engine_pll_dividers_vi( dividers->pll_post_divider = pll_patameters.ulClock.ucPostDiv; dividers->real_clock = - pll_patameters.ulClock.ulClock; + le32_to_cpu(pll_patameters.ulClock.ulClock); dividers->ul_fb_div.ul_fb_div_frac = - pll_patameters.ulFbDiv.usFbDivFrac; + le16_to_cpu(pll_patameters.ulFbDiv.usFbDivFrac); dividers->ul_fb_div.ul_fb_div = - pll_patameters.ulFbDiv.usFbDiv; + le16_to_cpu(pll_patameters.ulFbDiv.usFbDiv); dividers->uc_pll_ref_div = pll_patameters.ucPllRefDiv; @@ -380,7 +379,7 @@ int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr, COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_7 pll_patameters; int result; - pll_patameters.ulClock.ulClock = clock_value; + pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value); pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK; result = cgs_atom_exec_cmd_table @@ -412,7 +411,7 @@ int atomctrl_get_dfs_pll_dividers_vi( COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters; int result; - pll_patameters.ulClock.ulClock = clock_value; + pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value); pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK; @@ -425,12 +424,12 @@ int atomctrl_get_dfs_pll_dividers_vi( dividers->pll_post_divider = pll_patameters.ulClock.ucPostDiv; dividers->real_clock = - pll_patameters.ulClock.ulClock; + le32_to_cpu(pll_patameters.ulClock.ulClock); dividers->ul_fb_div.ul_fb_div_frac = - pll_patameters.ulFbDiv.usFbDivFrac; + le16_to_cpu(pll_patameters.ulFbDiv.usFbDivFrac); dividers->ul_fb_div.ul_fb_div = - pll_patameters.ulFbDiv.usFbDiv; + le16_to_cpu(pll_patameters.ulFbDiv.usFbDiv); dividers->uc_pll_ref_div = pll_patameters.ucPllRefDiv; @@ -519,13 +518,13 @@ int atomctrl_get_voltage_table_v3( for (i = 0; i < voltage_object->asGpioVoltageObj.ucGpioEntryNum; i++) { voltage_table->entries[i].value = - voltage_object->asGpioVoltageObj.asVolGpioLut[i].usVoltageValue; + le16_to_cpu(voltage_object->asGpioVoltageObj.asVolGpioLut[i].usVoltageValue); voltage_table->entries[i].smio_low = - voltage_object->asGpioVoltageObj.asVolGpioLut[i].ulVoltageId; + le32_to_cpu(voltage_object->asGpioVoltageObj.asVolGpioLut[i].ulVoltageId); } voltage_table->mask_low = - voltage_object->asGpioVoltageObj.ulGpioMaskVal; + le32_to_cpu(voltage_object->asGpioVoltageObj.ulGpioMaskVal); voltage_table->count = voltage_object->asGpioVoltageObj.ucGpioEntryNum; voltage_table->phase_delay = @@ -552,13 +551,13 @@ static bool atomctrl_lookup_gpio_pin( pin_assignment->ucGpioPinBitShift; gpio_pin_assignment->us_gpio_pin_aindex = le16_to_cpu(pin_assignment->usGpioPin_AIndex); - return false; + return true; } offset += offsetof(ATOM_GPIO_PIN_ASSIGNMENT, ucGPIO_ID) + 1; } - return true; + return false; } /** @@ -592,12 +591,12 @@ bool atomctrl_get_pp_assign_pin( const uint32_t pinId, pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment) { - bool bRet = 0; + bool bRet = false; ATOM_GPIO_PIN_LUT *gpio_lookup_table = get_gpio_lookup_table(hwmgr->device); PP_ASSERT_WITH_CODE((NULL != gpio_lookup_table), - "Could not find GPIO lookup Table in BIOS.", return -1); + "Could not find GPIO lookup Table in BIOS.", return false); bRet = atomctrl_lookup_gpio_pin(gpio_lookup_table, pinId, gpio_pin_assignment); @@ -650,8 +649,8 @@ int atomctrl_calculate_voltage_evv_on_sclk( return -1; if (getASICProfilingInfo->asHeader.ucTableFormatRevision < 3 || - (getASICProfilingInfo->asHeader.ucTableFormatRevision == 3 && - getASICProfilingInfo->asHeader.ucTableContentRevision < 4)) + (getASICProfilingInfo->asHeader.ucTableFormatRevision == 3 && + getASICProfilingInfo->asHeader.ucTableContentRevision < 4)) return -1; /*----------------------------------------------------------- @@ -662,37 +661,37 @@ int atomctrl_calculate_voltage_evv_on_sclk( switch (dpm_level) { case 1: - fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm1); - fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM1, 1000); + fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm1)); + fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM1), 1000); break; case 2: - fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm2); - fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM2, 1000); + fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm2)); + fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM2), 1000); break; case 3: - fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm3); - fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM3, 1000); + fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm3)); + fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM3), 1000); break; case 4: - fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm4); - fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM4, 1000); + fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm4)); + fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM4), 1000); break; case 5: - fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm5); - fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM5, 1000); + fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm5)); + fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM5), 1000); break; case 6: - fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm6); - fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM6, 1000); + fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm6)); + fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM6), 1000); break; case 7: - fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm7); - fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM7, 1000); + fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm7)); + fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM7), 1000); break; default: printk(KERN_ERR "DPM Level not supported\n"); fPowerDPMx = Convert_ULONG_ToFraction(1); - fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM0, 1000); + fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM0), 1000); } /*------------------------- @@ -716,9 +715,9 @@ int atomctrl_calculate_voltage_evv_on_sclk( return result; /* Finally, the actual fuse value */ - ul_RO_fused = sOutput_FuseValues.ulEfuseValue; - fMin = GetScaledFraction(sRO_fuse.ulEfuseMin, 1); - fRange = GetScaledFraction(sRO_fuse.ulEfuseEncodeRange, 1); + ul_RO_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); + fMin = GetScaledFraction(le32_to_cpu(sRO_fuse.ulEfuseMin), 1); + fRange = GetScaledFraction(le32_to_cpu(sRO_fuse.ulEfuseEncodeRange), 1); fRO_fused = fDecodeLinearFuse(ul_RO_fused, fMin, fRange, sRO_fuse.ucEfuseLength); sCACm_fuse = getASICProfilingInfo->sCACm; @@ -736,9 +735,9 @@ int atomctrl_calculate_voltage_evv_on_sclk( if (result) return result; - ul_CACm_fused = sOutput_FuseValues.ulEfuseValue; - fMin = GetScaledFraction(sCACm_fuse.ulEfuseMin, 1000); - fRange = GetScaledFraction(sCACm_fuse.ulEfuseEncodeRange, 1000); + ul_CACm_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); + fMin = GetScaledFraction(le32_to_cpu(sCACm_fuse.ulEfuseMin), 1000); + fRange = GetScaledFraction(le32_to_cpu(sCACm_fuse.ulEfuseEncodeRange), 1000); fCACm_fused = fDecodeLinearFuse(ul_CACm_fused, fMin, fRange, sCACm_fuse.ucEfuseLength); @@ -756,9 +755,9 @@ int atomctrl_calculate_voltage_evv_on_sclk( if (result) return result; - ul_CACb_fused = sOutput_FuseValues.ulEfuseValue; - fMin = GetScaledFraction(sCACb_fuse.ulEfuseMin, 1000); - fRange = GetScaledFraction(sCACb_fuse.ulEfuseEncodeRange, 1000); + ul_CACb_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); + fMin = GetScaledFraction(le32_to_cpu(sCACb_fuse.ulEfuseMin), 1000); + fRange = GetScaledFraction(le32_to_cpu(sCACb_fuse.ulEfuseEncodeRange), 1000); fCACb_fused = fDecodeLinearFuse(ul_CACb_fused, fMin, fRange, sCACb_fuse.ucEfuseLength); @@ -777,9 +776,9 @@ int atomctrl_calculate_voltage_evv_on_sclk( if (result) return result; - ul_Kt_Beta_fused = sOutput_FuseValues.ulEfuseValue; - fAverage = GetScaledFraction(sKt_Beta_fuse.ulEfuseEncodeAverage, 1000); - fRange = GetScaledFraction(sKt_Beta_fuse.ulEfuseEncodeRange, 1000); + ul_Kt_Beta_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); + fAverage = GetScaledFraction(le32_to_cpu(sKt_Beta_fuse.ulEfuseEncodeAverage), 1000); + fRange = GetScaledFraction(le32_to_cpu(sKt_Beta_fuse.ulEfuseEncodeRange), 1000); fKt_Beta_fused = fDecodeLogisticFuse(ul_Kt_Beta_fused, fAverage, fRange, sKt_Beta_fuse.ucEfuseLength); @@ -798,9 +797,9 @@ int atomctrl_calculate_voltage_evv_on_sclk( if (result) return result; - ul_Kv_m_fused = sOutput_FuseValues.ulEfuseValue; - fAverage = GetScaledFraction(sKv_m_fuse.ulEfuseEncodeAverage, 1000); - fRange = GetScaledFraction((sKv_m_fuse.ulEfuseEncodeRange & 0x7fffffff), 1000); + ul_Kv_m_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); + fAverage = GetScaledFraction(le32_to_cpu(sKv_m_fuse.ulEfuseEncodeAverage), 1000); + fRange = GetScaledFraction((le32_to_cpu(sKv_m_fuse.ulEfuseEncodeRange) & 0x7fffffff), 1000); fRange = fMultiply(fRange, ConvertToFraction(-1)); fKv_m_fused = fDecodeLogisticFuse(ul_Kv_m_fused, @@ -820,9 +819,9 @@ int atomctrl_calculate_voltage_evv_on_sclk( if (result) return result; - ul_Kv_b_fused = sOutput_FuseValues.ulEfuseValue; - fAverage = GetScaledFraction(sKv_b_fuse.ulEfuseEncodeAverage, 1000); - fRange = GetScaledFraction(sKv_b_fuse.ulEfuseEncodeRange, 1000); + ul_Kv_b_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); + fAverage = GetScaledFraction(le32_to_cpu(sKv_b_fuse.ulEfuseEncodeAverage), 1000); + fRange = GetScaledFraction(le32_to_cpu(sKv_b_fuse.ulEfuseEncodeRange), 1000); fKv_b_fused = fDecodeLogisticFuse(ul_Kv_b_fused, fAverage, fRange, sKv_b_fuse.ucEfuseLength); @@ -851,9 +850,9 @@ int atomctrl_calculate_voltage_evv_on_sclk( if (result) return result; - ul_FT_Lkg_V0NORM = sOutput_FuseValues.ulEfuseValue; - fLn_MaxDivMin = GetScaledFraction(getASICProfilingInfo->ulLkgEncodeLn_MaxDivMin, 10000); - fMin = GetScaledFraction(getASICProfilingInfo->ulLkgEncodeMin, 10000); + ul_FT_Lkg_V0NORM = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); + fLn_MaxDivMin = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLkgEncodeLn_MaxDivMin), 10000); + fMin = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLkgEncodeMin), 10000); fFT_Lkg_V0NORM = fDecodeLeakageID(ul_FT_Lkg_V0NORM, fLn_MaxDivMin, fMin, getASICProfilingInfo->ucLkgEfuseLength); @@ -863,40 +862,40 @@ int atomctrl_calculate_voltage_evv_on_sclk( * PART 2 - Grabbing all required values *------------------------------------------- */ - fSM_A0 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A0, 1000000), + fSM_A0 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A0), 1000000), ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A0_sign))); - fSM_A1 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A1, 1000000), + fSM_A1 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A1), 1000000), ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A1_sign))); - fSM_A2 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A2, 100000), + fSM_A2 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A2), 100000), ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A2_sign))); - fSM_A3 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A3, 1000000), + fSM_A3 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A3), 1000000), ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A3_sign))); - fSM_A4 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A4, 1000000), + fSM_A4 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A4), 1000000), ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A4_sign))); - fSM_A5 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A5, 1000), + fSM_A5 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A5), 1000), ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A5_sign))); - fSM_A6 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A6, 1000), + fSM_A6 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A6), 1000), ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A6_sign))); - fSM_A7 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A7, 1000), + fSM_A7 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A7), 1000), ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A7_sign))); - fMargin_RO_a = ConvertToFraction(getASICProfilingInfo->ulMargin_RO_a); - fMargin_RO_b = ConvertToFraction(getASICProfilingInfo->ulMargin_RO_b); - fMargin_RO_c = ConvertToFraction(getASICProfilingInfo->ulMargin_RO_c); + fMargin_RO_a = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_a)); + fMargin_RO_b = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_b)); + fMargin_RO_c = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_c)); - fMargin_fixed = ConvertToFraction(getASICProfilingInfo->ulMargin_fixed); + fMargin_fixed = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_fixed)); fMargin_FMAX_mean = GetScaledFraction( - getASICProfilingInfo->ulMargin_Fmax_mean, 10000); + le32_to_cpu(getASICProfilingInfo->ulMargin_Fmax_mean), 10000); fMargin_Plat_mean = GetScaledFraction( - getASICProfilingInfo->ulMargin_plat_mean, 10000); + le32_to_cpu(getASICProfilingInfo->ulMargin_plat_mean), 10000); fMargin_FMAX_sigma = GetScaledFraction( - getASICProfilingInfo->ulMargin_Fmax_sigma, 10000); + le32_to_cpu(getASICProfilingInfo->ulMargin_Fmax_sigma), 10000); fMargin_Plat_sigma = GetScaledFraction( - getASICProfilingInfo->ulMargin_plat_sigma, 10000); + le32_to_cpu(getASICProfilingInfo->ulMargin_plat_sigma), 10000); fMargin_DC_sigma = GetScaledFraction( - getASICProfilingInfo->ulMargin_DC_sigma, 100); + le32_to_cpu(getASICProfilingInfo->ulMargin_DC_sigma), 100); fMargin_DC_sigma = fDivide(fMargin_DC_sigma, ConvertToFraction(1000)); fCACm_fused = fDivide(fCACm_fused, ConvertToFraction(100)); @@ -908,14 +907,14 @@ int atomctrl_calculate_voltage_evv_on_sclk( fSclk = GetScaledFraction(sclk, 100); fV_max = fDivide(GetScaledFraction( - getASICProfilingInfo->ulMaxVddc, 1000), ConvertToFraction(4)); - fT_prod = GetScaledFraction(getASICProfilingInfo->ulBoardCoreTemp, 10); - fLKG_Factor = GetScaledFraction(getASICProfilingInfo->ulEvvLkgFactor, 100); - fT_FT = GetScaledFraction(getASICProfilingInfo->ulLeakageTemp, 10); + le32_to_cpu(getASICProfilingInfo->ulMaxVddc), 1000), ConvertToFraction(4)); + fT_prod = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulBoardCoreTemp), 10); + fLKG_Factor = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulEvvLkgFactor), 100); + fT_FT = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLeakageTemp), 10); fV_FT = fDivide(GetScaledFraction( - getASICProfilingInfo->ulLeakageVoltage, 1000), ConvertToFraction(4)); + le32_to_cpu(getASICProfilingInfo->ulLeakageVoltage), 1000), ConvertToFraction(4)); fV_min = fDivide(GetScaledFraction( - getASICProfilingInfo->ulMinVddc, 1000), ConvertToFraction(4)); + le32_to_cpu(getASICProfilingInfo->ulMinVddc), 1000), ConvertToFraction(4)); /*----------------------- * PART 3 @@ -925,7 +924,7 @@ int atomctrl_calculate_voltage_evv_on_sclk( fA_Term = fAdd(fMargin_RO_a, fAdd(fMultiply(fSM_A4, fSclk), fSM_A5)); fB_Term = fAdd(fAdd(fMultiply(fSM_A2, fSclk), fSM_A6), fMargin_RO_b); fC_Term = fAdd(fMargin_RO_c, - fAdd(fMultiply(fSM_A0,fLkg_FT), + fAdd(fMultiply(fSM_A0, fLkg_FT), fAdd(fMultiply(fSM_A1, fMultiply(fLkg_FT, fSclk)), fAdd(fMultiply(fSM_A3, fSclk), fSubtract(fSM_A7, fRO_fused))))); @@ -1063,9 +1062,55 @@ int atomctrl_get_voltage_evv_on_sclk( get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE; get_voltage_info_param_space.usVoltageLevel = - virtual_voltage_Id; + cpu_to_le16(virtual_voltage_Id); get_voltage_info_param_space.ulSCLKFreq = - sclk; + cpu_to_le32(sclk); + + result = cgs_atom_exec_cmd_table(hwmgr->device, + GetIndexIntoMasterTable(COMMAND, GetVoltageInfo), + &get_voltage_info_param_space); + + if (0 != result) + return result; + + *voltage = le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *) + (&get_voltage_info_param_space))->usVoltageLevel); + + return result; +} + +/** + * atomctrl_get_voltage_evv gets voltage via call to ATOM COMMAND table. + * @param hwmgr input: pointer to hwManager + * @param virtual_voltage_id input: voltage id which match per voltage DPM state: 0xff01, 0xff02.. 0xff08 + * @param voltage output: real voltage level in unit of mv + */ +int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr, + uint16_t virtual_voltage_id, + uint16_t *voltage) +{ + int result; + int entry_id; + GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 get_voltage_info_param_space; + + /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */ + for (entry_id = 0; entry_id < hwmgr->dyn_state.vddc_dependency_on_sclk->count; entry_id++) { + if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[entry_id].v == virtual_voltage_id) { + /* found */ + break; + } + } + + PP_ASSERT_WITH_CODE(entry_id < hwmgr->dyn_state.vddc_dependency_on_sclk->count, + "Can't find requested voltage id in vddc_dependency_on_sclk table!", + return -EINVAL; + ); + + get_voltage_info_param_space.ucVoltageType = VOLTAGE_TYPE_VDDC; + get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE; + get_voltage_info_param_space.usVoltageLevel = virtual_voltage_id; + get_voltage_info_param_space.ulSCLKFreq = + cpu_to_le32(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[entry_id].clk); result = cgs_atom_exec_cmd_table(hwmgr->device, GetIndexIntoMasterTable(COMMAND, GetVoltageInfo), @@ -1074,8 +1119,8 @@ int atomctrl_get_voltage_evv_on_sclk( if (0 != result) return result; - *voltage = ((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *) - (&get_voltage_info_param_space))->usVoltageLevel; + *voltage = le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *) + (&get_voltage_info_param_space))->usVoltageLevel); return result; } @@ -1165,8 +1210,8 @@ static int asic_internal_ss_get_ss_asignment(struct pp_hwmgr *hwmgr, if (entry_found) { ssEntry->speed_spectrum_percentage = - ssInfo->usSpreadSpectrumPercentage; - ssEntry->speed_spectrum_rate = ssInfo->usSpreadRateInKhz; + le16_to_cpu(ssInfo->usSpreadSpectrumPercentage); + ssEntry->speed_spectrum_rate = le16_to_cpu(ssInfo->usSpreadRateInKhz); if (((GET_DATA_TABLE_MAJOR_REVISION(table) == 2) && (GET_DATA_TABLE_MINOR_REVISION(table) >= 2)) || @@ -1222,7 +1267,7 @@ int atomctrl_read_efuse(void *device, uint16_t start_index, int result; READ_EFUSE_VALUE_PARAMETER efuse_param; - efuse_param.sEfuse.usEfuseIndex = (start_index / 32) * 4; + efuse_param.sEfuse.usEfuseIndex = cpu_to_le16((start_index / 32) * 4); efuse_param.sEfuse.ucBitShift = (uint8_t) (start_index - ((start_index / 32) * 32)); efuse_param.sEfuse.ucBitLength = (uint8_t) @@ -1232,19 +1277,21 @@ int atomctrl_read_efuse(void *device, uint16_t start_index, GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), &efuse_param); if (!result) - *efuse = efuse_param.ulEfuseValue & mask; + *efuse = le32_to_cpu(efuse_param.ulEfuseValue) & mask; return result; } int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock, - uint8_t level) + uint8_t level) { DYNAMICE_MEMORY_SETTINGS_PARAMETER_V2_1 memory_clock_parameters; int result; - memory_clock_parameters.asDPMMCReg.ulClock.ulClockFreq = memory_clock & SET_CLOCK_FREQ_MASK; - memory_clock_parameters.asDPMMCReg.ulClock.ulComputeClockFlag = ADJUST_MC_SETTING_PARAM; + memory_clock_parameters.asDPMMCReg.ulClock.ulClockFreq = + memory_clock & SET_CLOCK_FREQ_MASK; + memory_clock_parameters.asDPMMCReg.ulClock.ulComputeClockFlag = + ADJUST_MC_SETTING_PARAM; memory_clock_parameters.asDPMMCReg.ucMclkDPMState = level; result = cgs_atom_exec_cmd_table @@ -1264,8 +1311,8 @@ int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_ get_voltage_info_param_space.ucVoltageType = voltage_type; get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE; - get_voltage_info_param_space.usVoltageLevel = virtual_voltage_Id; - get_voltage_info_param_space.ulSCLKFreq = sclk; + get_voltage_info_param_space.usVoltageLevel = cpu_to_le16(virtual_voltage_Id); + get_voltage_info_param_space.ulSCLKFreq = cpu_to_le32(sclk); result = cgs_atom_exec_cmd_table(hwmgr->device, GetIndexIntoMasterTable(COMMAND, GetVoltageInfo), @@ -1274,7 +1321,7 @@ int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_ if (0 != result) return result; - *voltage = ((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel; + *voltage = le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel); return result; } @@ -1295,15 +1342,19 @@ int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctr for (i = 0; i < psmu_info->ucSclkEntryNum; i++) { table->entry[i].ucVco_setting = psmu_info->asSclkFcwRangeEntry[i].ucVco_setting; table->entry[i].ucPostdiv = psmu_info->asSclkFcwRangeEntry[i].ucPostdiv; - table->entry[i].usFcw_pcc = psmu_info->asSclkFcwRangeEntry[i].ucFcw_pcc; - table->entry[i].usFcw_trans_upper = psmu_info->asSclkFcwRangeEntry[i].ucFcw_trans_upper; - table->entry[i].usRcw_trans_lower = psmu_info->asSclkFcwRangeEntry[i].ucRcw_trans_lower; + table->entry[i].usFcw_pcc = + le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucFcw_pcc); + table->entry[i].usFcw_trans_upper = + le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucFcw_trans_upper); + table->entry[i].usRcw_trans_lower = + le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucRcw_trans_lower); } return 0; } -int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__avfs_parameters *param) +int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, + struct pp_atom_ctrl__avfs_parameters *param) { ATOM_ASIC_PROFILING_INFO_V3_6 *profile = NULL; @@ -1317,30 +1368,30 @@ int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__a if (!profile) return -1; - param->ulAVFS_meanNsigma_Acontant0 = profile->ulAVFS_meanNsigma_Acontant0; - param->ulAVFS_meanNsigma_Acontant1 = profile->ulAVFS_meanNsigma_Acontant1; - param->ulAVFS_meanNsigma_Acontant2 = profile->ulAVFS_meanNsigma_Acontant2; - param->usAVFS_meanNsigma_DC_tol_sigma = profile->usAVFS_meanNsigma_DC_tol_sigma; - param->usAVFS_meanNsigma_Platform_mean = profile->usAVFS_meanNsigma_Platform_mean; - param->usAVFS_meanNsigma_Platform_sigma = profile->usAVFS_meanNsigma_Platform_sigma; - param->ulGB_VDROOP_TABLE_CKSOFF_a0 = profile->ulGB_VDROOP_TABLE_CKSOFF_a0; - param->ulGB_VDROOP_TABLE_CKSOFF_a1 = profile->ulGB_VDROOP_TABLE_CKSOFF_a1; - param->ulGB_VDROOP_TABLE_CKSOFF_a2 = profile->ulGB_VDROOP_TABLE_CKSOFF_a2; - param->ulGB_VDROOP_TABLE_CKSON_a0 = profile->ulGB_VDROOP_TABLE_CKSON_a0; - param->ulGB_VDROOP_TABLE_CKSON_a1 = profile->ulGB_VDROOP_TABLE_CKSON_a1; - param->ulGB_VDROOP_TABLE_CKSON_a2 = profile->ulGB_VDROOP_TABLE_CKSON_a2; - param->ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = profile->ulAVFSGB_FUSE_TABLE_CKSOFF_m1; - param->usAVFSGB_FUSE_TABLE_CKSOFF_m2 = profile->usAVFSGB_FUSE_TABLE_CKSOFF_m2; - param->ulAVFSGB_FUSE_TABLE_CKSOFF_b = profile->ulAVFSGB_FUSE_TABLE_CKSOFF_b; - param->ulAVFSGB_FUSE_TABLE_CKSON_m1 = profile->ulAVFSGB_FUSE_TABLE_CKSON_m1; - param->usAVFSGB_FUSE_TABLE_CKSON_m2 = profile->usAVFSGB_FUSE_TABLE_CKSON_m2; - param->ulAVFSGB_FUSE_TABLE_CKSON_b = profile->ulAVFSGB_FUSE_TABLE_CKSON_b; - param->usMaxVoltage_0_25mv = profile->usMaxVoltage_0_25mv; + param->ulAVFS_meanNsigma_Acontant0 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant0); + param->ulAVFS_meanNsigma_Acontant1 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant1); + param->ulAVFS_meanNsigma_Acontant2 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant2); + param->usAVFS_meanNsigma_DC_tol_sigma = le16_to_cpu(profile->usAVFS_meanNsigma_DC_tol_sigma); + param->usAVFS_meanNsigma_Platform_mean = le16_to_cpu(profile->usAVFS_meanNsigma_Platform_mean); + param->usAVFS_meanNsigma_Platform_sigma = le16_to_cpu(profile->usAVFS_meanNsigma_Platform_sigma); + param->ulGB_VDROOP_TABLE_CKSOFF_a0 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a0); + param->ulGB_VDROOP_TABLE_CKSOFF_a1 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a1); + param->ulGB_VDROOP_TABLE_CKSOFF_a2 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a2); + param->ulGB_VDROOP_TABLE_CKSON_a0 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a0); + param->ulGB_VDROOP_TABLE_CKSON_a1 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a1); + param->ulGB_VDROOP_TABLE_CKSON_a2 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a2); + param->ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSOFF_m1); + param->usAVFSGB_FUSE_TABLE_CKSOFF_m2 = le16_to_cpu(profile->usAVFSGB_FUSE_TABLE_CKSOFF_m2); + param->ulAVFSGB_FUSE_TABLE_CKSOFF_b = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSOFF_b); + param->ulAVFSGB_FUSE_TABLE_CKSON_m1 = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSON_m1); + param->usAVFSGB_FUSE_TABLE_CKSON_m2 = le16_to_cpu(profile->usAVFSGB_FUSE_TABLE_CKSON_m2); + param->ulAVFSGB_FUSE_TABLE_CKSON_b = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSON_b); + param->usMaxVoltage_0_25mv = le16_to_cpu(profile->usMaxVoltage_0_25mv); param->ucEnableGB_VDROOP_TABLE_CKSOFF = profile->ucEnableGB_VDROOP_TABLE_CKSOFF; param->ucEnableGB_VDROOP_TABLE_CKSON = profile->ucEnableGB_VDROOP_TABLE_CKSON; param->ucEnableGB_FUSE_TABLE_CKSOFF = profile->ucEnableGB_FUSE_TABLE_CKSOFF; param->ucEnableGB_FUSE_TABLE_CKSON = profile->ucEnableGB_FUSE_TABLE_CKSON; - param->usPSM_Age_ComFactor = profile->usPSM_Age_ComFactor; + param->usPSM_Age_ComFactor = le16_to_cpu(profile->usPSM_Age_ComFactor); param->ucEnableApplyAVFS_CKS_OFF_Voltage = profile->ucEnableApplyAVFS_CKS_OFF_Voltage; return 0; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h index 1e35a96..fc898af 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h @@ -281,6 +281,7 @@ struct pp_atom_ctrl__avfs_parameters { extern bool atomctrl_get_pp_assign_pin(struct pp_hwmgr *hwmgr, const uint32_t pinId, pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment); extern int atomctrl_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage); +extern int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr, uint16_t virtual_voltage_id, uint16_t *voltage); extern uint32_t atomctrl_get_mpll_reference_clock(struct pp_hwmgr *hwmgr); extern int atomctrl_get_memory_clock_spread_spectrum(struct pp_hwmgr *hwmgr, const uint32_t memory_clock, pp_atomctrl_internal_ss_info *ssInfo); extern int atomctrl_get_engine_clock_spread_spectrum(struct pp_hwmgr *hwmgr, const uint32_t engine_clock, pp_atomctrl_internal_ss_info *ssInfo); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h index 009bd59..8f50a03 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h @@ -50,55 +50,45 @@ typedef union _fInt { * Function Declarations * ------------------------------------------------------------------------------- */ -fInt ConvertToFraction(int); /* Use this to convert an INT to a FINT */ -fInt Convert_ULONG_ToFraction(uint32_t); /* Use this to convert an uint32_t to a FINT */ -fInt GetScaledFraction(int, int); /* Use this to convert an INT to a FINT after scaling it by a factor */ -int ConvertBackToInteger(fInt); /* Convert a FINT back to an INT that is scaled by 1000 (i.e. last 3 digits are the decimal digits) */ - -fInt fNegate(fInt); /* Returns -1 * input fInt value */ -fInt fAdd (fInt, fInt); /* Returns the sum of two fInt numbers */ -fInt fSubtract (fInt A, fInt B); /* Returns A-B - Sometimes easier than Adding negative numbers */ -fInt fMultiply (fInt, fInt); /* Returns the product of two fInt numbers */ -fInt fDivide (fInt A, fInt B); /* Returns A/B */ -fInt fGetSquare(fInt); /* Returns the square of a fInt number */ -fInt fSqrt(fInt); /* Returns the Square Root of a fInt number */ - -int uAbs(int); /* Returns the Absolute value of the Int */ -fInt fAbs(fInt); /* Returns the Absolute value of the fInt */ -int uPow(int base, int exponent); /* Returns base^exponent an INT */ - -void SolveQuadracticEqn(fInt, fInt, fInt, fInt[]); /* Returns the 2 roots via the array */ -bool Equal(fInt, fInt); /* Returns true if two fInts are equal to each other */ -bool GreaterThan(fInt A, fInt B); /* Returns true if A > B */ - -fInt fExponential(fInt exponent); /* Can be used to calculate e^exponent */ -fInt fNaturalLog(fInt value); /* Can be used to calculate ln(value) */ +static fInt ConvertToFraction(int); /* Use this to convert an INT to a FINT */ +static fInt Convert_ULONG_ToFraction(uint32_t); /* Use this to convert an uint32_t to a FINT */ +static fInt GetScaledFraction(int, int); /* Use this to convert an INT to a FINT after scaling it by a factor */ +static int ConvertBackToInteger(fInt); /* Convert a FINT back to an INT that is scaled by 1000 (i.e. last 3 digits are the decimal digits) */ + +static fInt fNegate(fInt); /* Returns -1 * input fInt value */ +static fInt fAdd (fInt, fInt); /* Returns the sum of two fInt numbers */ +static fInt fSubtract (fInt A, fInt B); /* Returns A-B - Sometimes easier than Adding negative numbers */ +static fInt fMultiply (fInt, fInt); /* Returns the product of two fInt numbers */ +static fInt fDivide (fInt A, fInt B); /* Returns A/B */ +static fInt fGetSquare(fInt); /* Returns the square of a fInt number */ +static fInt fSqrt(fInt); /* Returns the Square Root of a fInt number */ + +static int uAbs(int); /* Returns the Absolute value of the Int */ +static int uPow(int base, int exponent); /* Returns base^exponent an INT */ + +static void SolveQuadracticEqn(fInt, fInt, fInt, fInt[]); /* Returns the 2 roots via the array */ +static bool Equal(fInt, fInt); /* Returns true if two fInts are equal to each other */ +static bool GreaterThan(fInt A, fInt B); /* Returns true if A > B */ + +static fInt fExponential(fInt exponent); /* Can be used to calculate e^exponent */ +static fInt fNaturalLog(fInt value); /* Can be used to calculate ln(value) */ /* Fuse decoding functions * ------------------------------------------------------------------------------------- */ -fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t bitlength); -fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint32_t bitlength); -fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min, uint32_t bitlength); +static fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t bitlength); +static fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint32_t bitlength); +static fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min, uint32_t bitlength); /* Internal Support Functions - Use these ONLY for testing or adding to internal functions * ------------------------------------------------------------------------------------- * Some of the following functions take two INTs as their input - This is unsafe for a variety of reasons. */ -fInt Add (int, int); /* Add two INTs and return Sum as FINT */ -fInt Multiply (int, int); /* Multiply two INTs and return Product as FINT */ -fInt Divide (int, int); /* You get the idea... */ -fInt fNegate(fInt); +static fInt Divide (int, int); /* Divide two INTs and return result as FINT */ +static fInt fNegate(fInt); -int uGetScaledDecimal (fInt); /* Internal function */ -int GetReal (fInt A); /* Internal function */ - -/* Future Additions and Incomplete Functions - * ------------------------------------------------------------------------------------- - */ -int GetRoundedValue(fInt); /* Incomplete function - Useful only when Precision is lacking */ - /* Let us say we have 2.126 but can only handle 2 decimal points. We could */ - /* either chop of 6 and keep 2.12 or use this function to get 2.13, which is more accurate */ +static int uGetScaledDecimal (fInt); /* Internal function */ +static int GetReal (fInt A); /* Internal function */ /* ------------------------------------------------------------------------------------- * TROUBLESHOOTING INFORMATION @@ -115,7 +105,7 @@ int GetRoundedValue(fInt); /* Incomplete function - Usef * START OF CODE * ------------------------------------------------------------------------------------- */ -fInt fExponential(fInt exponent) /*Can be used to calculate e^exponent*/ +static fInt fExponential(fInt exponent) /*Can be used to calculate e^exponent*/ { uint32_t i; bool bNegated = false; @@ -154,7 +144,7 @@ fInt fExponential(fInt exponent) /*Can be used to calculate e^exponent*/ return solution; } -fInt fNaturalLog(fInt value) +static fInt fNaturalLog(fInt value) { uint32_t i; fInt upper_bound = Divide(8, 1000); @@ -179,7 +169,7 @@ fInt fNaturalLog(fInt value) return (fAdd(solution, error_term)); } -fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t bitlength) +static fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t bitlength) { fInt f_fuse_value = Convert_ULONG_ToFraction(fuse_value); fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1); @@ -194,7 +184,7 @@ fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t b } -fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint32_t bitlength) +static fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint32_t bitlength) { fInt f_fuse_value = Convert_ULONG_ToFraction(fuse_value); fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1); @@ -212,7 +202,7 @@ fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint return f_decoded_value; } -fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min, uint32_t bitlength) +static fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min, uint32_t bitlength) { fInt fLeakage; fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1); @@ -225,7 +215,7 @@ fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min, return fLeakage; } -fInt ConvertToFraction(int X) /*Add all range checking here. Is it possible to make fInt a private declaration? */ +static fInt ConvertToFraction(int X) /*Add all range checking here. Is it possible to make fInt a private declaration? */ { fInt temp; @@ -237,13 +227,13 @@ fInt ConvertToFraction(int X) /*Add all range checking here. Is it possible to m return temp; } -fInt fNegate(fInt X) +static fInt fNegate(fInt X) { fInt CONSTANT_NEGONE = ConvertToFraction(-1); return (fMultiply(X, CONSTANT_NEGONE)); } -fInt Convert_ULONG_ToFraction(uint32_t X) +static fInt Convert_ULONG_ToFraction(uint32_t X) { fInt temp; @@ -255,7 +245,7 @@ fInt Convert_ULONG_ToFraction(uint32_t X) return temp; } -fInt GetScaledFraction(int X, int factor) +static fInt GetScaledFraction(int X, int factor) { int times_shifted, factor_shifted; bool bNEGATED; @@ -304,7 +294,7 @@ fInt GetScaledFraction(int X, int factor) } /* Addition using two fInts */ -fInt fAdd (fInt X, fInt Y) +static fInt fAdd (fInt X, fInt Y) { fInt Sum; @@ -314,7 +304,7 @@ fInt fAdd (fInt X, fInt Y) } /* Addition using two fInts */ -fInt fSubtract (fInt X, fInt Y) +static fInt fSubtract (fInt X, fInt Y) { fInt Difference; @@ -323,7 +313,7 @@ fInt fSubtract (fInt X, fInt Y) return Difference; } -bool Equal(fInt A, fInt B) +static bool Equal(fInt A, fInt B) { if (A.full == B.full) return true; @@ -331,7 +321,7 @@ bool Equal(fInt A, fInt B) return false; } -bool GreaterThan(fInt A, fInt B) +static bool GreaterThan(fInt A, fInt B) { if (A.full > B.full) return true; @@ -339,7 +329,7 @@ bool GreaterThan(fInt A, fInt B) return false; } -fInt fMultiply (fInt X, fInt Y) /* Uses 64-bit integers (int64_t) */ +static fInt fMultiply (fInt X, fInt Y) /* Uses 64-bit integers (int64_t) */ { fInt Product; int64_t tempProduct; @@ -363,7 +353,7 @@ fInt fMultiply (fInt X, fInt Y) /* Uses 64-bit integers (int64_t) */ return Product; } -fInt fDivide (fInt X, fInt Y) +static fInt fDivide (fInt X, fInt Y) { fInt fZERO, fQuotient; int64_t longlongX, longlongY; @@ -384,7 +374,7 @@ fInt fDivide (fInt X, fInt Y) return fQuotient; } -int ConvertBackToInteger (fInt A) /*THIS is the function that will be used to check with the Golden settings table*/ +static int ConvertBackToInteger (fInt A) /*THIS is the function that will be used to check with the Golden settings table*/ { fInt fullNumber, scaledDecimal, scaledReal; @@ -397,13 +387,13 @@ int ConvertBackToInteger (fInt A) /*THIS is the function that will be used to ch return fullNumber.full; } -fInt fGetSquare(fInt A) +static fInt fGetSquare(fInt A) { return fMultiply(A,A); } /* x_new = x_old - (x_old^2 - C) / (2 * x_old) */ -fInt fSqrt(fInt num) +static fInt fSqrt(fInt num) { fInt F_divide_Fprime, Fprime; fInt test; @@ -460,7 +450,7 @@ fInt fSqrt(fInt num) return (x_new); } -void SolveQuadracticEqn(fInt A, fInt B, fInt C, fInt Roots[]) +static void SolveQuadracticEqn(fInt A, fInt B, fInt C, fInt Roots[]) { fInt *pRoots = &Roots[0]; fInt temp, root_first, root_second; @@ -498,52 +488,13 @@ void SolveQuadracticEqn(fInt A, fInt B, fInt C, fInt Roots[]) * ----------------------------------------------------------------------------- */ -/* Addition using two normal ints - Temporary - Use only for testing purposes?. */ -fInt Add (int X, int Y) -{ - fInt A, B, Sum; - - A.full = (X << SHIFT_AMOUNT); - B.full = (Y << SHIFT_AMOUNT); - - Sum.full = A.full + B.full; - - return Sum; -} - /* Conversion Functions */ -int GetReal (fInt A) +static int GetReal (fInt A) { return (A.full >> SHIFT_AMOUNT); } -/* Temporarily Disabled */ -int GetRoundedValue(fInt A) /*For now, round the 3rd decimal place */ -{ - /* ROUNDING TEMPORARLY DISABLED - int temp = A.full; - int decimal_cutoff, decimal_mask = 0x000001FF; - decimal_cutoff = temp & decimal_mask; - if (decimal_cutoff > 0x147) { - temp += 673; - }*/ - - return ConvertBackToInteger(A)/10000; /*Temporary - in case this was used somewhere else */ -} - -fInt Multiply (int X, int Y) -{ - fInt A, B, Product; - - A.full = X << SHIFT_AMOUNT; - B.full = Y << SHIFT_AMOUNT; - - Product = fMultiply(A, B); - - return Product; -} - -fInt Divide (int X, int Y) +static fInt Divide (int X, int Y) { fInt A, B, Quotient; @@ -555,7 +506,7 @@ fInt Divide (int X, int Y) return Quotient; } -int uGetScaledDecimal (fInt A) /*Converts the fractional portion to whole integers - Costly function */ +static int uGetScaledDecimal (fInt A) /*Converts the fractional portion to whole integers - Costly function */ { int dec[PRECISION]; int i, scaledDecimal = 0, tmp = A.partial.decimal; @@ -570,7 +521,7 @@ int uGetScaledDecimal (fInt A) /*Converts the fractional portion to whole intege return scaledDecimal; } -int uPow(int base, int power) +static int uPow(int base, int power) { if (power == 0) return 1; @@ -578,15 +529,7 @@ int uPow(int base, int power) return (base)*uPow(base, power - 1); } -fInt fAbs(fInt A) -{ - if (A.partial.real < 0) - return (fMultiply(A, ConvertToFraction(-1))); - else - return A; -} - -int uAbs(int X) +static int uAbs(int X) { if (X < 0) return (X * -1); @@ -594,7 +537,7 @@ int uAbs(int X) return X; } -fInt fRoundUpByStepSize(fInt A, fInt fStepSize, bool error_term) +static fInt fRoundUpByStepSize(fInt A, fInt fStepSize, bool error_term) { fInt solution; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c index 2f1a14f..6c321b0 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c @@ -794,19 +794,35 @@ static const ATOM_PPLIB_STATE_V2 *get_state_entry_v2( static const ATOM_PPLIB_POWERPLAYTABLE *get_powerplay_table( struct pp_hwmgr *hwmgr) { - const void *table_addr = NULL; + const void *table_addr = hwmgr->soft_pp_table; uint8_t frev, crev; uint16_t size; - table_addr = cgs_atom_get_data_table(hwmgr->device, - GetIndexIntoMasterTable(DATA, PowerPlayInfo), - &size, &frev, &crev); + if (!table_addr) { + table_addr = cgs_atom_get_data_table(hwmgr->device, + GetIndexIntoMasterTable(DATA, PowerPlayInfo), + &size, &frev, &crev); - hwmgr->soft_pp_table = table_addr; + hwmgr->soft_pp_table = table_addr; + hwmgr->soft_pp_table_size = size; + } return (const ATOM_PPLIB_POWERPLAYTABLE *)table_addr; } +int pp_tables_get_response_times(struct pp_hwmgr *hwmgr, + uint32_t *vol_rep_time, uint32_t *bb_rep_time) +{ + const ATOM_PPLIB_POWERPLAYTABLE *powerplay_tab = get_powerplay_table(hwmgr); + + PP_ASSERT_WITH_CODE(NULL != powerplay_tab, + "Missing PowerPlay Table!", return -EINVAL); + + *vol_rep_time = (uint32_t)le16_to_cpu(powerplay_tab->usVoltageTime); + *bb_rep_time = (uint32_t)le16_to_cpu(powerplay_tab->usBackbiasTime); + + return 0; +} int pp_tables_get_num_of_entries(struct pp_hwmgr *hwmgr, unsigned long *num_of_entries) @@ -1499,7 +1515,7 @@ int get_number_of_vce_state_table_entries( const ATOM_PPLIB_VCE_State_Table *vce_table = get_vce_state_table(hwmgr, table); - if (vce_table > 0) + if (vce_table) return vce_table->numEntries; return 0; @@ -1589,11 +1605,6 @@ static int pp_tables_initialize(struct pp_hwmgr *hwmgr) static int pp_tables_uninitialize(struct pp_hwmgr *hwmgr) { - if (NULL != hwmgr->soft_pp_table) { - kfree(hwmgr->soft_pp_table); - hwmgr->soft_pp_table = NULL; - } - if (NULL != hwmgr->dyn_state.vddc_dependency_on_sclk) { kfree(hwmgr->dyn_state.vddc_dependency_on_sclk); hwmgr->dyn_state.vddc_dependency_on_sclk = NULL; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.h b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.h index 3043480..baddaa7 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.h @@ -32,16 +32,19 @@ struct pp_hw_power_state; extern const struct pp_table_func pptable_funcs; typedef int (*pp_tables_hw_clock_info_callback)(struct pp_hwmgr *hwmgr, - struct pp_hw_power_state *hw_ps, - unsigned int index, - const void *clock_info); + struct pp_hw_power_state *hw_ps, + unsigned int index, + const void *clock_info); int pp_tables_get_num_of_entries(struct pp_hwmgr *hwmgr, - unsigned long *num_of_entries); + unsigned long *num_of_entries); int pp_tables_get_entry(struct pp_hwmgr *hwmgr, - unsigned long entry_index, - struct pp_power_state *ps, - pp_tables_hw_clock_info_callback func); + unsigned long entry_index, + struct pp_power_state *ps, + pp_tables_hw_clock_info_callback func); + +int pp_tables_get_response_times(struct pp_hwmgr *hwmgr, + uint32_t *vol_rep_time, uint32_t *bb_rep_time); #endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c index 5d0f655..c7dc111 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c @@ -571,7 +571,7 @@ int tonga_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) if (0 == data->sclk_dpm_key_disabled) { /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ PP_ASSERT_WITH_CODE( - (0 == tonga_is_dpm_running(hwmgr)), + !tonga_is_dpm_running(hwmgr), "Trying to Disable SCLK DPM when DPM is disabled", return -1 ); @@ -587,7 +587,7 @@ int tonga_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) if (0 == data->mclk_dpm_key_disabled) { /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */ PP_ASSERT_WITH_CODE( - (0 == tonga_is_dpm_running(hwmgr)), + !tonga_is_dpm_running(hwmgr), "Trying to Disable MCLK DPM when DPM is disabled", return -1 ); @@ -614,7 +614,7 @@ int tonga_stop_dpm(struct pp_hwmgr *hwmgr) if (0 == data->pcie_dpm_key_disabled) { /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ PP_ASSERT_WITH_CODE( - (0 == tonga_is_dpm_running(hwmgr)), + !tonga_is_dpm_running(hwmgr), "Trying to Disable PCIE DPM when DPM is disabled", return -1 ); @@ -630,7 +630,7 @@ int tonga_stop_dpm(struct pp_hwmgr *hwmgr) /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ PP_ASSERT_WITH_CODE( - (0 == tonga_is_dpm_running(hwmgr)), + !tonga_is_dpm_running(hwmgr), "Trying to Disable Voltage CNTL when DPM is disabled", return -1 ); @@ -688,8 +688,9 @@ int tonga_dpm_force_state(struct pp_hwmgr *hwmgr, uint32_t n) uint32_t level_mask = 1 << n; /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */ - PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr), - "Trying to force SCLK when DPM is disabled", return -1;); + PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr), + "Trying to force SCLK when DPM is disabled", + return -1;); if (0 == data->sclk_dpm_key_disabled) return (0 == smum_send_msg_to_smc_with_parameter( hwmgr->smumgr, @@ -712,8 +713,9 @@ int tonga_dpm_force_state_mclk(struct pp_hwmgr *hwmgr, uint32_t n) uint32_t level_mask = 1 << n; /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */ - PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr), - "Trying to Force MCLK when DPM is disabled", return -1;); + PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr), + "Trying to Force MCLK when DPM is disabled", + return -1;); if (0 == data->mclk_dpm_key_disabled) return (0 == smum_send_msg_to_smc_with_parameter( hwmgr->smumgr, @@ -735,8 +737,9 @@ int tonga_dpm_force_state_pcie(struct pp_hwmgr *hwmgr, uint32_t n) tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ - PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr), - "Trying to Force PCIE level when DPM is disabled", return -1;); + PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr), + "Trying to Force PCIE level when DPM is disabled", + return -1;); if (0 == data->pcie_dpm_key_disabled) return (0 == smum_send_msg_to_smc_with_parameter( hwmgr->smumgr, @@ -774,7 +777,7 @@ int tonga_process_firmware_header(struct pp_hwmgr *hwmgr) uint32_t tmp; int result; - bool error = 0; + bool error = false; result = tonga_read_smc_sram_dword(hwmgr->smumgr, SMU72_FIRMWARE_HEADER_LOCATION + @@ -933,11 +936,11 @@ int tonga_init_power_gate_state(struct pp_hwmgr *hwmgr) { tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - data->uvd_power_gated = 0; - data->vce_power_gated = 0; - data->samu_power_gated = 0; - data->acp_power_gated = 0; - data->pg_acp_init = 1; + data->uvd_power_gated = false; + data->vce_power_gated = false; + data->samu_power_gated = false; + data->acp_power_gated = false; + data->pg_acp_init = true; return 0; } @@ -955,7 +958,7 @@ int tonga_check_for_dpm_running(struct pp_hwmgr *hwmgr) * because we may have test scenarios that need us intentionly disable SCLK/MCLK DPM, * whereas voltage control is a fundemental change that will not be disabled */ - return (0 == tonga_is_dpm_running(hwmgr) ? 0 : 1); + return (!tonga_is_dpm_running(hwmgr) ? 0 : 1); } /** @@ -968,7 +971,7 @@ int tonga_check_for_dpm_stopped(struct pp_hwmgr *hwmgr) { tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - if (0 != tonga_is_dpm_running(hwmgr)) { + if (tonga_is_dpm_running(hwmgr)) { /* If HW Virtualization is enabled, dpm_table_start will not have a valid value */ if (!data->dpm_table_start) { return 1; @@ -991,7 +994,7 @@ static int tonga_trim_voltage_table(struct pp_hwmgr *hwmgr, { uint32_t table_size, i, j; uint16_t vvalue; - bool bVoltageFound = 0; + bool bVoltageFound = false; pp_atomctrl_voltage_table *table; PP_ASSERT_WITH_CODE((NULL != voltage_table), "Voltage Table empty.", return -1;); @@ -1007,11 +1010,11 @@ static int tonga_trim_voltage_table(struct pp_hwmgr *hwmgr, for (i = 0; i < voltage_table->count; i++) { vvalue = voltage_table->entries[i].value; - bVoltageFound = 0; + bVoltageFound = false; for (j = 0; j < table->count; j++) { if (vvalue == table->entries[j].value) { - bVoltageFound = 1; + bVoltageFound = true; break; } } @@ -1331,7 +1334,6 @@ static int tonga_populate_cac_tables(struct pp_hwmgr *hwmgr, { uint32_t count; uint8_t index; - int result = 0; tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); struct phm_ppt_v1_voltage_lookup_table *vddgfx_lookup_table = pptable_info->vddgfx_lookup_table; @@ -1378,7 +1380,7 @@ static int tonga_populate_cac_tables(struct pp_hwmgr *hwmgr, } } - return result; + return 0; } @@ -2042,7 +2044,7 @@ static int tonga_populate_single_memory_level( if ((data->mclk_stutter_mode_threshold != 0) && (memory_clock <= data->mclk_stutter_mode_threshold) && - (data->is_uvd_enabled == 0) + (!data->is_uvd_enabled) && (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1) && (data->display_timing.num_existing_displays <= 2) && (data->display_timing.num_existing_displays != 0)) @@ -2705,7 +2707,7 @@ static int tonga_reset_single_dpm_table( dpm_table->count = count; for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++) { - dpm_table->dpm_levels[i].enabled = 0; + dpm_table->dpm_levels[i].enabled = false; } return 0; @@ -2718,7 +2720,7 @@ static void tonga_setup_pcie_table_entry( { dpm_table->dpm_levels[index].value = pcie_gen; dpm_table->dpm_levels[index].param1 = pcie_lanes; - dpm_table->dpm_levels[index].enabled = 1; + dpm_table->dpm_levels[index].enabled = true; } static int tonga_setup_default_pcie_tables(struct pp_hwmgr *hwmgr) @@ -2828,7 +2830,7 @@ static int tonga_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) allowed_vdd_sclk_table->entries[i].clk) { data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value = allowed_vdd_sclk_table->entries[i].clk; - data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; to do */ + data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = true; /*(i==0) ? 1 : 0; to do */ data->dpm_table.sclk_table.count++; } } @@ -2842,7 +2844,7 @@ static int tonga_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) allowed_vdd_mclk_table->entries[i].clk) { data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value = allowed_vdd_mclk_table->entries[i].clk; - data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; */ + data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = true; /*(i==0) ? 1 : 0; */ data->dpm_table.mclk_table.count++; } } @@ -3026,8 +3028,8 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr) reg_value = 0; if ((0 == reg_value) && - (0 == atomctrl_get_pp_assign_pin(hwmgr, - VDDC_VRHOT_GPIO_PINID, &gpio_pin_assignment))) { + (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, + &gpio_pin_assignment))) { table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift; phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_RegulatorHot); @@ -3040,8 +3042,8 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr) /* ACDC Switch GPIO */ reg_value = 0; if ((0 == reg_value) && - (0 == atomctrl_get_pp_assign_pin(hwmgr, - PP_AC_DC_SWITCH_GPIO_PINID, &gpio_pin_assignment))) { + (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID, + &gpio_pin_assignment))) { table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift; phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_AutomaticDCTransition); @@ -3063,8 +3065,7 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr) } reg_value = 0; - if ((0 == reg_value) && - (0 == atomctrl_get_pp_assign_pin(hwmgr, + if ((0 == reg_value) && (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID, &gpio_pin_assignment))) { phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ThermalOutGPIO); @@ -3135,7 +3136,7 @@ int tonga_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr) if (0 == data->sclk_dpm_key_disabled) { /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ - if (0 != tonga_is_dpm_running(hwmgr)) + if (tonga_is_dpm_running(hwmgr)) printk(KERN_ERR "[ powerplay ] Trying to set Enable Mask when DPM is disabled \n"); if (0 != data->dpm_level_enable_mask.sclk_dpm_enable_mask) { @@ -3150,7 +3151,7 @@ int tonga_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr) if (0 == data->mclk_dpm_key_disabled) { /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ - if (0 != tonga_is_dpm_running(hwmgr)) + if (tonga_is_dpm_running(hwmgr)) printk(KERN_ERR "[ powerplay ] Trying to set Enable Mask when DPM is disabled \n"); if (0 != data->dpm_level_enable_mask.mclk_dpm_enable_mask) { @@ -3261,7 +3262,7 @@ int tonga_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwm /* initialize vddc_dep_on_dal_pwrl table */ table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record); - table_clk_vlt = (struct phm_clock_voltage_dependency_table *)kzalloc(table_size, GFP_KERNEL); + table_clk_vlt = kzalloc(table_size, GFP_KERNEL); if (NULL == table_clk_vlt) { printk(KERN_ERR "[ powerplay ] Can not allocate space for vddc_dep_on_dal_pwrl! \n"); @@ -3336,9 +3337,9 @@ int tonga_unforce_dpm_levels(struct pp_hwmgr *hwmgr) tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); int result = 1; - PP_ASSERT_WITH_CODE (0 == tonga_is_dpm_running(hwmgr), - "Trying to Unforce DPM when DPM is disabled. Returning without sending SMC message.", - return result); + PP_ASSERT_WITH_CODE (!tonga_is_dpm_running(hwmgr), + "Trying to Unforce DPM when DPM is disabled. Returning without sending SMC message.", + return result); if (0 == data->pcie_dpm_key_disabled) { PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc( @@ -3742,7 +3743,7 @@ uint8_t tonga_get_memory_modile_index(struct pp_hwmgr *hwmgr) bool tonga_check_s0_mc_reg_index(uint16_t inReg, uint16_t *outReg) { - bool result = 1; + bool result = true; switch (inReg) { case mmMC_SEQ_RAS_TIMING: @@ -3826,7 +3827,7 @@ bool tonga_check_s0_mc_reg_index(uint16_t inReg, uint16_t *outReg) break; default: - result = 0; + result = false; break; } @@ -4422,13 +4423,6 @@ int tonga_reset_asic_tasks(struct pp_hwmgr *hwmgr) int tonga_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) { - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - - if (data->soft_pp_table) { - kfree(data->soft_pp_table); - data->soft_pp_table = NULL; - } - return phm_hwmgr_backend_fini(hwmgr); } @@ -4442,7 +4436,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr) { int result = 0; SMU72_Discrete_DpmTable *table = NULL; - tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + tonga_hwmgr *data; pp_atomctrl_gpio_pin_assignment gpio_pin_assignment; struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); phw_tonga_ulv_parm *ulv; @@ -4451,7 +4445,13 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((NULL != hwmgr), "Invalid Parameter!", return -1;); - data->dll_defaule_on = 0; + data = kzalloc(sizeof(struct tonga_hwmgr), GFP_KERNEL); + if (data == NULL) + return -ENOMEM; + + hwmgr->backend = data; + + data->dll_defaule_on = false; data->sram_end = SMC_RAM_END; data->activity_target[0] = PPTONGA_TARGETACTIVITY_DFLT; @@ -4557,13 +4557,13 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr) /* ULV Support*/ ulv = &(data->ulv); - ulv->ulv_supported = 0; + ulv->ulv_supported = false; /* Initalize Dynamic State Adjustment Rule Settings*/ result = tonga_initializa_dynamic_state_adjustment_rule_settings(hwmgr); if (result) printk(KERN_ERR "[ powerplay ] tonga_initializa_dynamic_state_adjustment_rule_settings failed!\n"); - data->uvd_enabled = 0; + data->uvd_enabled = false; table = &(data->smc_state_table); @@ -4571,7 +4571,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr) * if ucGPIO_ID=VDDC_PCC_GPIO_PINID in GPIO_LUTable, * Peak Current Control feature is enabled and we should program PCC HW register */ - if (0 == atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) { + if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) { uint32_t temp_reg = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL); @@ -4610,7 +4610,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr) phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SMU7); - data->vddc_phase_shed_control = 0; + data->vddc_phase_shed_control = false; phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDPowerGating); @@ -4629,7 +4629,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr) } if (0 == result) { - data->is_tlu_enabled = 0; + data->is_tlu_enabled = false; hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = TONGA_MAX_HARDWARE_POWERLEVELS; hwmgr->platform_descriptor.hardwarePerformanceLevels = 2; @@ -4639,7 +4639,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr) sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO; result = cgs_query_system_info(hwmgr->device, &sys_info); if (result) - data->pcie_gen_cap = 0x30007; + data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK; else data->pcie_gen_cap = (uint32_t)sys_info.value; if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) @@ -4648,7 +4648,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr) sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW; result = cgs_query_system_info(hwmgr->device, &sys_info); if (result) - data->pcie_lane_cap = 0x2f0000; + data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK; else data->pcie_lane_cap = (uint32_t)sys_info.value; } else { @@ -5310,9 +5310,8 @@ static int tonga_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) if ((0 == data->sclk_dpm_key_disabled) && (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { - PP_ASSERT_WITH_CODE( - 0 == tonga_is_dpm_running(hwmgr), - "Trying to freeze SCLK DPM when DPM is disabled", + PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr), + "Trying to freeze SCLK DPM when DPM is disabled", ); PP_ASSERT_WITH_CODE( 0 == smum_send_msg_to_smc(hwmgr->smumgr, @@ -5324,8 +5323,8 @@ static int tonga_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) if ((0 == data->mclk_dpm_key_disabled) && (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { - PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr), - "Trying to freeze MCLK DPM when DPM is disabled", + PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr), + "Trying to freeze MCLK DPM when DPM is disabled", ); PP_ASSERT_WITH_CODE( 0 == smum_send_msg_to_smc(hwmgr->smumgr, @@ -5460,7 +5459,6 @@ static int tonga_trim_single_dpm_states(struct pp_hwmgr *hwmgr, static int tonga_trim_dpm_states(struct pp_hwmgr *hwmgr, const struct tonga_power_state *hw_state) { - int result = 0; struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); uint32_t high_limit_count; @@ -5480,7 +5478,7 @@ static int tonga_trim_dpm_states(struct pp_hwmgr *hwmgr, const struct tonga_powe hw_state->performance_levels[0].memory_clock, hw_state->performance_levels[high_limit_count].memory_clock); - return result; + return 0; } static int tonga_generate_dpm_level_enable_mask(struct pp_hwmgr *hwmgr, const void *input) @@ -5627,8 +5625,8 @@ static int tonga_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { - PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr), - "Trying to Unfreeze SCLK DPM when DPM is disabled", + PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr), + "Trying to Unfreeze SCLK DPM when DPM is disabled", ); PP_ASSERT_WITH_CODE( 0 == smum_send_msg_to_smc(hwmgr->smumgr, @@ -5640,9 +5638,8 @@ static int tonga_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) if ((0 == data->mclk_dpm_key_disabled) && (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { - PP_ASSERT_WITH_CODE( - 0 == tonga_is_dpm_running(hwmgr), - "Trying to Unfreeze MCLK DPM when DPM is disabled", + PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr), + "Trying to Unfreeze MCLK DPM when DPM is disabled", ); PP_ASSERT_WITH_CODE( 0 == smum_send_msg_to_smc(hwmgr->smumgr, @@ -6031,42 +6028,6 @@ static int tonga_get_fan_control_mode(struct pp_hwmgr *hwmgr) CG_FDO_CTRL2, FDO_PWM_MODE); } -static int tonga_get_pp_table(struct pp_hwmgr *hwmgr, char **table) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - - if (!data->soft_pp_table) { - data->soft_pp_table = kmemdup(hwmgr->soft_pp_table, - hwmgr->soft_pp_table_size, - GFP_KERNEL); - if (!data->soft_pp_table) - return -ENOMEM; - } - - *table = (char *)&data->soft_pp_table; - - return hwmgr->soft_pp_table_size; -} - -static int tonga_set_pp_table(struct pp_hwmgr *hwmgr, const char *buf, size_t size) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - - if (!data->soft_pp_table) { - data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL); - if (!data->soft_pp_table) - return -ENOMEM; - } - - memcpy(data->soft_pp_table, buf, size); - - hwmgr->soft_pp_table = data->soft_pp_table; - - /* TODO: re-init powerplay to implement modified pptable */ - - return 0; -} - static int tonga_force_clock_level(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask) { @@ -6174,11 +6135,96 @@ static int tonga_print_clock_levels(struct pp_hwmgr *hwmgr, return size; } +static int tonga_get_sclk_od(struct pp_hwmgr *hwmgr) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + struct tonga_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); + struct tonga_single_dpm_table *golden_sclk_table = + &(data->golden_dpm_table.sclk_table); + int value; + + value = (sclk_table->dpm_levels[sclk_table->count - 1].value - + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) * + 100 / + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; + + return value; +} + +static int tonga_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + struct tonga_single_dpm_table *golden_sclk_table = + &(data->golden_dpm_table.sclk_table); + struct pp_power_state *ps; + struct tonga_power_state *tonga_ps; + + if (value > 20) + value = 20; + + ps = hwmgr->request_ps; + + if (ps == NULL) + return -EINVAL; + + tonga_ps = cast_phw_tonga_power_state(&ps->hardware); + + tonga_ps->performance_levels[tonga_ps->performance_level_count - 1].engine_clock = + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * + value / 100 + + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; + + return 0; +} + +static int tonga_get_mclk_od(struct pp_hwmgr *hwmgr) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + struct tonga_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); + struct tonga_single_dpm_table *golden_mclk_table = + &(data->golden_dpm_table.mclk_table); + int value; + + value = (mclk_table->dpm_levels[mclk_table->count - 1].value - + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) * + 100 / + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; + + return value; +} + +static int tonga_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + struct tonga_single_dpm_table *golden_mclk_table = + &(data->golden_dpm_table.mclk_table); + struct pp_power_state *ps; + struct tonga_power_state *tonga_ps; + + if (value > 20) + value = 20; + + ps = hwmgr->request_ps; + + if (ps == NULL) + return -EINVAL; + + tonga_ps = cast_phw_tonga_power_state(&ps->hardware); + + tonga_ps->performance_levels[tonga_ps->performance_level_count - 1].memory_clock = + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value * + value / 100 + + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; + + return 0; +} + static const struct pp_hwmgr_func tonga_hwmgr_funcs = { .backend_init = &tonga_hwmgr_backend_init, .backend_fini = &tonga_hwmgr_backend_fini, .asic_setup = &tonga_setup_asic_task, .dynamic_state_management_enable = &tonga_enable_dpm_tasks, + .dynamic_state_management_disable = &tonga_disable_dpm_tasks, .apply_state_adjust_rules = tonga_apply_state_adjust_rules, .force_dpm_level = &tonga_force_dpm_level, .power_state_set = tonga_set_power_state_tasks, @@ -6212,22 +6258,16 @@ static const struct pp_hwmgr_func tonga_hwmgr_funcs = { .check_states_equal = tonga_check_states_equal, .set_fan_control_mode = tonga_set_fan_control_mode, .get_fan_control_mode = tonga_get_fan_control_mode, - .get_pp_table = tonga_get_pp_table, - .set_pp_table = tonga_set_pp_table, .force_clock_level = tonga_force_clock_level, .print_clock_levels = tonga_print_clock_levels, + .get_sclk_od = tonga_get_sclk_od, + .set_sclk_od = tonga_set_sclk_od, + .get_mclk_od = tonga_get_mclk_od, + .set_mclk_od = tonga_set_mclk_od, }; int tonga_hwmgr_init(struct pp_hwmgr *hwmgr) { - tonga_hwmgr *data; - - data = kzalloc (sizeof(tonga_hwmgr), GFP_KERNEL); - if (data == NULL) - return -ENOMEM; - memset(data, 0x00, sizeof(tonga_hwmgr)); - - hwmgr->backend = data; hwmgr->hwmgr_func = &tonga_hwmgr_funcs; hwmgr->pptable_func = &tonga_pptable_funcs; pp_tonga_thermal_initialize(hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h index 573cd39..3961884 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h @@ -352,9 +352,6 @@ struct tonga_hwmgr { bool samu_power_gated; /* 1: gated, 0:not gated */ bool acp_power_gated; /* 1: gated, 0:not gated */ bool pg_acp_init; - - /* soft pptable for re-uploading into smu */ - void *soft_pp_table; }; typedef struct tonga_hwmgr tonga_hwmgr; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c index dccc859..cfb647f 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c @@ -167,8 +167,7 @@ static int get_vddc_lookup_table( table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_voltage_lookup_record) * max_levels; - table = (phm_ppt_v1_voltage_lookup_table *) - kzalloc(table_size, GFP_KERNEL); + table = kzalloc(table_size, GFP_KERNEL); if (NULL == table) return -ENOMEM; @@ -327,7 +326,7 @@ static int get_valid_clk( table_size = sizeof(uint32_t) + sizeof(uint32_t) * clk_volt_pp_table->count; - table = (struct phm_clock_array *)kzalloc(table_size, GFP_KERNEL); + table = kzalloc(table_size, GFP_KERNEL); if (NULL == table) return -ENOMEM; @@ -377,8 +376,7 @@ static int get_mclk_voltage_dependency_table( table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record) * mclk_dep_table->ucNumEntries; - mclk_table = (phm_ppt_v1_clock_voltage_dependency_table *) - kzalloc(table_size, GFP_KERNEL); + mclk_table = kzalloc(table_size, GFP_KERNEL); if (NULL == mclk_table) return -ENOMEM; @@ -424,8 +422,7 @@ static int get_sclk_voltage_dependency_table( table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record) * tonga_table->ucNumEntries; - sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *) - kzalloc(table_size, GFP_KERNEL); + sclk_table = kzalloc(table_size, GFP_KERNEL); if (NULL == sclk_table) return -ENOMEM; @@ -456,8 +453,7 @@ static int get_sclk_voltage_dependency_table( table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record) * polaris_table->ucNumEntries; - sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *) - kzalloc(table_size, GFP_KERNEL); + sclk_table = kzalloc(table_size, GFP_KERNEL); if (NULL == sclk_table) return -ENOMEM; @@ -504,7 +500,7 @@ static int get_pcie_table( table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_pcie_record) * atom_pcie_table->ucNumEntries; - pcie_table = (phm_ppt_v1_pcie_table *)kzalloc(table_size, GFP_KERNEL); + pcie_table = kzalloc(table_size, GFP_KERNEL); if (pcie_table == NULL) return -ENOMEM; @@ -541,7 +537,7 @@ static int get_pcie_table( table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_pcie_record) * atom_pcie_table->ucNumEntries; - pcie_table = (phm_ppt_v1_pcie_table *)kzalloc(table_size, GFP_KERNEL); + pcie_table = kzalloc(table_size, GFP_KERNEL); if (pcie_table == NULL) return -ENOMEM; @@ -695,8 +691,7 @@ static int get_mm_clock_voltage_table( table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_mm_clock_voltage_dependency_record) * mm_dependency_table->ucNumEntries; - mm_table = (phm_ppt_v1_mm_clock_voltage_dependency_table *) - kzalloc(table_size, GFP_KERNEL); + mm_table = kzalloc(table_size, GFP_KERNEL); if (NULL == mm_table) return -ENOMEM; @@ -1073,13 +1068,9 @@ int tonga_pp_tables_initialize(struct pp_hwmgr *hwmgr) int tonga_pp_tables_uninitialize(struct pp_hwmgr *hwmgr) { - int result = 0; struct phm_ppt_v1_information *pp_table_information = (struct phm_ppt_v1_information *)(hwmgr->pptable); - if (NULL != hwmgr->soft_pp_table) - hwmgr->soft_pp_table = NULL; - kfree(pp_table_information->vdd_dep_on_sclk); pp_table_information->vdd_dep_on_sclk = NULL; @@ -1116,7 +1107,7 @@ int tonga_pp_tables_uninitialize(struct pp_hwmgr *hwmgr) kfree(hwmgr->pptable); hwmgr->pptable = NULL; - return result; + return 0; } const struct pp_table_func tonga_pptable_funcs = { diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h index 50b367d..b764c8c 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h @@ -132,6 +132,7 @@ struct amd_pp_init { uint32_t chip_family; uint32_t chip_id; uint32_t rev_id; + bool powercontainment_enabled; }; enum amd_pp_display_config_type{ AMD_PP_DisplayConfigType_None = 0, @@ -342,6 +343,10 @@ struct amd_powerplay_funcs { int (*set_pp_table)(void *handle, const char *buf, size_t size); int (*force_clock_level)(void *handle, enum pp_clock_type type, uint32_t mask); int (*print_clock_levels)(void *handle, enum pp_clock_type type, char *buf); + int (*get_sclk_od)(void *handle); + int (*set_sclk_od)(void *handle, uint32_t value); + int (*get_mclk_od)(void *handle); + int (*set_mclk_od)(void *handle, uint32_t value); }; struct amd_powerplay { @@ -355,6 +360,8 @@ int amd_powerplay_init(struct amd_pp_init *pp_init, int amd_powerplay_fini(void *handle); +int amd_powerplay_reset(void *handle); + int amd_powerplay_display_configuration_change(void *handle, const struct amd_pp_display_configuration *input); diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h index 56f712c..962cb53 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h @@ -340,6 +340,7 @@ extern int phm_powergate_vce(struct pp_hwmgr *hwmgr, bool gate); extern int phm_powerdown_uvd(struct pp_hwmgr *hwmgr); extern int phm_setup_asic(struct pp_hwmgr *hwmgr); extern int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr); +extern int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr); extern void phm_init_dynamic_caps(struct pp_hwmgr *hwmgr); extern bool phm_is_hw_access_blocked(struct pp_hwmgr *hwmgr); extern int phm_block_hw_access(struct pp_hwmgr *hwmgr, bool block); diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index 77e8e33..bf0d2ac 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -278,6 +278,8 @@ struct pp_hwmgr_func { int (*dynamic_state_management_enable)( struct pp_hwmgr *hw_mgr); + int (*dynamic_state_management_disable)( + struct pp_hwmgr *hw_mgr); int (*patch_boot_state)(struct pp_hwmgr *hwmgr, struct pp_hw_power_state *hw_ps); @@ -333,11 +335,13 @@ struct pp_hwmgr_func { int (*get_clock_by_type)(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks); int (*get_max_high_clocks)(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks); int (*power_off_asic)(struct pp_hwmgr *hwmgr); - int (*get_pp_table)(struct pp_hwmgr *hwmgr, char **table); - int (*set_pp_table)(struct pp_hwmgr *hwmgr, const char *buf, size_t size); int (*force_clock_level)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask); int (*print_clock_levels)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, char *buf); int (*enable_per_cu_power_gating)(struct pp_hwmgr *hwmgr, bool enable); + int (*get_sclk_od)(struct pp_hwmgr *hwmgr); + int (*set_sclk_od)(struct pp_hwmgr *hwmgr, uint32_t value); + int (*get_mclk_od)(struct pp_hwmgr *hwmgr); + int (*set_mclk_od)(struct pp_hwmgr *hwmgr, uint32_t value); }; struct pp_table_func { @@ -580,6 +584,7 @@ struct pp_hwmgr { struct pp_smumgr *smumgr; const void *soft_pp_table; uint32_t soft_pp_table_size; + void *hardcode_pp_table; bool need_pp_table_upload; enum amd_dpm_forced_level dpm_level; bool block_hw_access; @@ -609,6 +614,7 @@ struct pp_hwmgr { uint32_t num_ps; struct pp_thermal_controller_info thermal_controller; bool fan_ctrl_is_in_default_mode; + bool powercontainment_enabled; uint32_t fan_ctrl_default_mode; uint32_t tmin; struct phm_microcode_version_info microcode_version_info; diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h index fc9e3d1..3c235f0 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h @@ -131,6 +131,12 @@ extern int smu_free_memory(void *device, void *handle); smum_wait_on_indirect_register(smumgr, \ mm##port##_INDEX, index, value, mask) +#define SMUM_WAIT_INDIRECT_REGISTER(smumgr, port, reg, value, mask) \ + SMUM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(smumgr, port, ix##reg, value, mask) + +#define SMUM_WAIT_INDIRECT_FIELD(smumgr, port, reg, field, fieldval) \ + SMUM_WAIT_INDIRECT_REGISTER(smumgr, port, reg, (fieldval) << SMUM_FIELD_SHIFT(reg, field), \ + SMUM_FIELD_MASK(reg, field) ) #define SMUM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, \ index, value, mask) \ @@ -158,6 +164,10 @@ extern int smu_free_memory(void *device, void *handle); (SMUM_FIELD_MASK(reg, field) & ((field_val) << \ SMUM_FIELD_SHIFT(reg, field)))) +#define SMUM_READ_INDIRECT_FIELD(device, port, reg, field) \ + SMUM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \ + reg, field) + #define SMUM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(smumgr, \ port, index, value, mask) \ smum_wait_on_indirect_register(smumgr, \ @@ -191,6 +201,13 @@ extern int smu_free_memory(void *device, void *handle); SMUM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \ reg, field, fieldval)) + +#define SMUM_WRITE_INDIRECT_FIELD(device, port, reg, field, fieldval) \ + cgs_write_ind_register(device, port, ix##reg, \ + SMUM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \ + reg, field, fieldval)) + + #define SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, port, reg, field, fieldval) \ SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, port, reg, \ (fieldval) << SMUM_FIELD_SHIFT(reg, field), \ @@ -200,4 +217,16 @@ extern int smu_free_memory(void *device, void *handle); SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, \ (fieldval) << SMUM_FIELD_SHIFT(reg, field), \ SMUM_FIELD_MASK(reg, field)) + +#define SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, port, index, value, mask) \ + smum_wait_for_indirect_register_unequal(smumgr, \ + mm##port##_INDEX, index, value, mask) + +#define SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, value, mask) \ + SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, port, ix##reg, value, mask) + +#define SMUM_WAIT_INDIRECT_FIELD_UNEQUAL(smumgr, port, reg, field, fieldval) \ + SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, (fieldval) << SMUM_FIELD_SHIFT(reg, field), \ + SMUM_FIELD_MASK(reg, field) ) + #endif diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c index 0728c1e3..7723473 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c @@ -23,6 +23,7 @@ #include <linux/types.h> #include <linux/kernel.h> #include <linux/slab.h> +#include <drm/amdgpu_drm.h> #include "pp_instance.h" #include "smumgr.h" #include "cgs_common.h" @@ -52,10 +53,10 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle) handle->smu_mgr = smumgr; switch (smumgr->chip_family) { - case AMD_FAMILY_CZ: + case AMDGPU_FAMILY_CZ: cz_smum_init(smumgr); break; - case AMD_FAMILY_VI: + case AMDGPU_FAMILY_VI: switch (smumgr->chip_id) { case CHIP_TONGA: tonga_smum_init(smumgr); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c index b22722e..f42c536 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c @@ -479,7 +479,6 @@ static int tonga_request_smu_reload_fw(struct pp_smumgr *smumgr) struct tonga_smumgr *tonga_smu = (struct tonga_smumgr *)(smumgr->backend); uint16_t fw_to_load; - int result = 0; struct SMU_DRAMData_TOC *toc; /** * First time this gets called during SmuMgr init, @@ -563,7 +562,7 @@ static int tonga_request_smu_reload_fw(struct pp_smumgr *smumgr) smumgr, PPSMC_MSG_LoadUcodes, fw_to_load), "Fail to Request SMU Load uCode", return 0); - return result; + return 0; } static int tonga_request_smu_load_specific_fw(struct pp_smumgr *smumgr, diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h index c89dc77..b961a1c 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h @@ -26,7 +26,7 @@ TRACE_EVENT(amd_sched_job, TP_fast_assign( __entry->entity = sched_job->s_entity; __entry->sched_job = sched_job; - __entry->fence = &sched_job->s_fence->base; + __entry->fence = &sched_job->s_fence->finished; __entry->name = sched_job->sched->name; __entry->job_count = kfifo_len( &sched_job->s_entity->job_queue) / sizeof(sched_job); @@ -46,7 +46,7 @@ TRACE_EVENT(amd_sched_process_job, ), TP_fast_assign( - __entry->fence = &fence->base; + __entry->fence = &fence->finished; ), TP_printk("fence=%p signaled", __entry->fence) ); diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index c16248c..ef312bb 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -32,6 +32,7 @@ static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity); static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); +static void amd_sched_process_job(struct fence *f, struct fence_cb *cb); struct kmem_cache *sched_fence_slab; atomic_t sched_fence_slab_ref = ATOMIC_INIT(0); @@ -140,7 +141,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched, return r; atomic_set(&entity->fence_seq, 0); - entity->fence_context = fence_context_alloc(1); + entity->fence_context = fence_context_alloc(2); return 0; } @@ -251,17 +252,21 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity) s_fence = to_amd_sched_fence(fence); if (s_fence && s_fence->sched == sched) { - /* Fence is from the same scheduler */ - if (test_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &fence->flags)) { - /* Ignore it when it is already scheduled */ - fence_put(entity->dependency); - return false; - } - /* Wait for fence to be scheduled */ - entity->cb.func = amd_sched_entity_clear_dep; - list_add_tail(&entity->cb.node, &s_fence->scheduled_cb); - return true; + /* + * Fence is from the same scheduler, only need to wait for + * it to be scheduled + */ + fence = fence_get(&s_fence->scheduled); + fence_put(entity->dependency); + entity->dependency = fence; + if (!fence_add_callback(fence, &entity->cb, + amd_sched_entity_clear_dep)) + return true; + + /* Ignore it when it is already scheduled */ + fence_put(fence); + return false; } if (!fence_add_callback(entity->dependency, &entity->cb, @@ -319,46 +324,114 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job) return added; } -static void amd_sched_free_job(struct fence *f, struct fence_cb *cb) { - struct amd_sched_job *job = container_of(cb, struct amd_sched_job, cb_free_job); - schedule_work(&job->work_free_job); -} - /* job_finish is called after hw fence signaled, and * the job had already been deleted from ring_mirror_list */ -void amd_sched_job_finish(struct amd_sched_job *s_job) +static void amd_sched_job_finish(struct work_struct *work) { - struct amd_sched_job *next; + struct amd_sched_job *s_job = container_of(work, struct amd_sched_job, + finish_work); struct amd_gpu_scheduler *sched = s_job->sched; + /* remove job from ring_mirror_list */ + spin_lock(&sched->job_list_lock); + list_del_init(&s_job->node); if (sched->timeout != MAX_SCHEDULE_TIMEOUT) { - if (cancel_delayed_work(&s_job->work_tdr)) - amd_sched_job_put(s_job); + struct amd_sched_job *next; + + spin_unlock(&sched->job_list_lock); + cancel_delayed_work_sync(&s_job->work_tdr); + spin_lock(&sched->job_list_lock); /* queue TDR for next job */ next = list_first_entry_or_null(&sched->ring_mirror_list, struct amd_sched_job, node); - if (next) { - INIT_DELAYED_WORK(&next->work_tdr, s_job->timeout_callback); - amd_sched_job_get(next); + if (next) schedule_delayed_work(&next->work_tdr, sched->timeout); - } } + spin_unlock(&sched->job_list_lock); + sched->ops->free_job(s_job); } -void amd_sched_job_begin(struct amd_sched_job *s_job) +static void amd_sched_job_finish_cb(struct fence *f, struct fence_cb *cb) +{ + struct amd_sched_job *job = container_of(cb, struct amd_sched_job, + finish_cb); + schedule_work(&job->finish_work); +} + +static void amd_sched_job_begin(struct amd_sched_job *s_job) { struct amd_gpu_scheduler *sched = s_job->sched; + spin_lock(&sched->job_list_lock); + list_add_tail(&s_job->node, &sched->ring_mirror_list); if (sched->timeout != MAX_SCHEDULE_TIMEOUT && - list_first_entry_or_null(&sched->ring_mirror_list, struct amd_sched_job, node) == s_job) - { - INIT_DELAYED_WORK(&s_job->work_tdr, s_job->timeout_callback); - amd_sched_job_get(s_job); + list_first_entry_or_null(&sched->ring_mirror_list, + struct amd_sched_job, node) == s_job) + schedule_delayed_work(&s_job->work_tdr, sched->timeout); + spin_unlock(&sched->job_list_lock); +} + +static void amd_sched_job_timedout(struct work_struct *work) +{ + struct amd_sched_job *job = container_of(work, struct amd_sched_job, + work_tdr.work); + + job->sched->ops->timedout_job(job); +} + +void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched) +{ + struct amd_sched_job *s_job; + + spin_lock(&sched->job_list_lock); + list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) { + if (fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) { + fence_put(s_job->s_fence->parent); + s_job->s_fence->parent = NULL; + } + } + atomic_set(&sched->hw_rq_count, 0); + spin_unlock(&sched->job_list_lock); +} + +void amd_sched_job_recovery(struct amd_gpu_scheduler *sched) +{ + struct amd_sched_job *s_job, *tmp; + int r; + + spin_lock(&sched->job_list_lock); + s_job = list_first_entry_or_null(&sched->ring_mirror_list, + struct amd_sched_job, node); + if (s_job) schedule_delayed_work(&s_job->work_tdr, sched->timeout); + + list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { + struct amd_sched_fence *s_fence = s_job->s_fence; + struct fence *fence; + + spin_unlock(&sched->job_list_lock); + fence = sched->ops->run_job(s_job); + atomic_inc(&sched->hw_rq_count); + if (fence) { + s_fence->parent = fence_get(fence); + r = fence_add_callback(fence, &s_fence->cb, + amd_sched_process_job); + if (r == -ENOENT) + amd_sched_process_job(fence, &s_fence->cb); + else if (r) + DRM_ERROR("fence add callback failed (%d)\n", + r); + fence_put(fence); + } else { + DRM_ERROR("Failed to run job!\n"); + amd_sched_process_job(NULL, &s_fence->cb); + } + spin_lock(&sched->job_list_lock); } + spin_unlock(&sched->job_list_lock); } /** @@ -372,36 +445,29 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job) { struct amd_sched_entity *entity = sched_job->s_entity; - sched_job->use_sched = 1; - fence_add_callback(&sched_job->s_fence->base, - &sched_job->cb_free_job, amd_sched_free_job); trace_amd_sched_job(sched_job); + fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb, + amd_sched_job_finish_cb); wait_event(entity->sched->job_scheduled, amd_sched_entity_in(sched_job)); } /* init a sched_job with basic field */ int amd_sched_job_init(struct amd_sched_job *job, - struct amd_gpu_scheduler *sched, - struct amd_sched_entity *entity, - void (*timeout_cb)(struct work_struct *work), - void (*free_cb)(struct kref *refcount), - void *owner, struct fence **fence) + struct amd_gpu_scheduler *sched, + struct amd_sched_entity *entity, + void *owner) { - INIT_LIST_HEAD(&job->node); - kref_init(&job->refcount); job->sched = sched; job->s_entity = entity; job->s_fence = amd_sched_fence_create(entity, owner); if (!job->s_fence) return -ENOMEM; - job->s_fence->s_job = job; - job->timeout_callback = timeout_cb; - job->free_callback = free_cb; + INIT_WORK(&job->finish_work, amd_sched_job_finish); + INIT_LIST_HEAD(&job->node); + INIT_DELAYED_WORK(&job->work_tdr, amd_sched_job_timedout); - if (fence) - *fence = &job->s_fence->base; return 0; } @@ -450,23 +516,25 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) struct amd_sched_fence *s_fence = container_of(cb, struct amd_sched_fence, cb); struct amd_gpu_scheduler *sched = s_fence->sched; - unsigned long flags; atomic_dec(&sched->hw_rq_count); - - /* remove job from ring_mirror_list */ - spin_lock_irqsave(&sched->job_list_lock, flags); - list_del_init(&s_fence->s_job->node); - sched->ops->finish_job(s_fence->s_job); - spin_unlock_irqrestore(&sched->job_list_lock, flags); - - amd_sched_fence_signal(s_fence); + amd_sched_fence_finished(s_fence); trace_amd_sched_process_job(s_fence); - fence_put(&s_fence->base); + fence_put(&s_fence->finished); wake_up_interruptible(&sched->wake_up_worker); } +static bool amd_sched_blocked(struct amd_gpu_scheduler *sched) +{ + if (kthread_should_park()) { + kthread_parkme(); + return true; + } + + return false; +} + static int amd_sched_main(void *param) { struct sched_param sparam = {.sched_priority = 1}; @@ -476,14 +544,15 @@ static int amd_sched_main(void *param) sched_setscheduler(current, SCHED_FIFO, &sparam); while (!kthread_should_stop()) { - struct amd_sched_entity *entity; + struct amd_sched_entity *entity = NULL; struct amd_sched_fence *s_fence; struct amd_sched_job *sched_job; struct fence *fence; wait_event_interruptible(sched->wake_up_worker, - (entity = amd_sched_select_entity(sched)) || - kthread_should_stop()); + (!amd_sched_blocked(sched) && + (entity = amd_sched_select_entity(sched))) || + kthread_should_stop()); if (!entity) continue; @@ -495,16 +564,19 @@ static int amd_sched_main(void *param) s_fence = sched_job->s_fence; atomic_inc(&sched->hw_rq_count); - amd_sched_job_pre_schedule(sched, sched_job); + amd_sched_job_begin(sched_job); + fence = sched->ops->run_job(sched_job); amd_sched_fence_scheduled(s_fence); if (fence) { + s_fence->parent = fence_get(fence); r = fence_add_callback(fence, &s_fence->cb, amd_sched_process_job); if (r == -ENOENT) amd_sched_process_job(fence, &s_fence->cb); else if (r) - DRM_ERROR("fence add callback failed (%d)\n", r); + DRM_ERROR("fence add callback failed (%d)\n", + r); fence_put(fence); } else { DRM_ERROR("Failed to run job!\n"); diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h index 070095a..7cbbbfb 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h @@ -27,8 +27,6 @@ #include <linux/kfifo.h> #include <linux/fence.h> -#define AMD_SCHED_FENCE_SCHEDULED_BIT FENCE_FLAG_USER_BITS - struct amd_gpu_scheduler; struct amd_sched_rq; @@ -68,36 +66,34 @@ struct amd_sched_rq { }; struct amd_sched_fence { - struct fence base; + struct fence scheduled; + struct fence finished; struct fence_cb cb; - struct list_head scheduled_cb; + struct fence *parent; struct amd_gpu_scheduler *sched; spinlock_t lock; void *owner; - struct amd_sched_job *s_job; }; struct amd_sched_job { - struct kref refcount; struct amd_gpu_scheduler *sched; struct amd_sched_entity *s_entity; struct amd_sched_fence *s_fence; - bool use_sched; /* true if the job goes to scheduler */ - struct fence_cb cb_free_job; - struct work_struct work_free_job; - struct list_head node; - struct delayed_work work_tdr; - void (*timeout_callback) (struct work_struct *work); - void (*free_callback)(struct kref *refcount); + struct fence_cb finish_cb; + struct work_struct finish_work; + struct list_head node; + struct delayed_work work_tdr; }; -extern const struct fence_ops amd_sched_fence_ops; +extern const struct fence_ops amd_sched_fence_ops_scheduled; +extern const struct fence_ops amd_sched_fence_ops_finished; static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f) { - struct amd_sched_fence *__f = container_of(f, struct amd_sched_fence, base); + if (f->ops == &amd_sched_fence_ops_scheduled) + return container_of(f, struct amd_sched_fence, scheduled); - if (__f->base.ops == &amd_sched_fence_ops) - return __f; + if (f->ops == &amd_sched_fence_ops_finished) + return container_of(f, struct amd_sched_fence, finished); return NULL; } @@ -109,8 +105,8 @@ static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f) struct amd_sched_backend_ops { struct fence *(*dependency)(struct amd_sched_job *sched_job); struct fence *(*run_job)(struct amd_sched_job *sched_job); - void (*begin_job)(struct amd_sched_job *sched_job); - void (*finish_job)(struct amd_sched_job *sched_job); + void (*timedout_job)(struct amd_sched_job *sched_job); + void (*free_job)(struct amd_sched_job *sched_job); }; enum amd_sched_priority { @@ -152,25 +148,11 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job); struct amd_sched_fence *amd_sched_fence_create( struct amd_sched_entity *s_entity, void *owner); void amd_sched_fence_scheduled(struct amd_sched_fence *fence); -void amd_sched_fence_signal(struct amd_sched_fence *fence); +void amd_sched_fence_finished(struct amd_sched_fence *fence); int amd_sched_job_init(struct amd_sched_job *job, - struct amd_gpu_scheduler *sched, - struct amd_sched_entity *entity, - void (*timeout_cb)(struct work_struct *work), - void (*free_cb)(struct kref* refcount), - void *owner, struct fence **fence); -void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched , - struct amd_sched_job *s_job); -void amd_sched_job_finish(struct amd_sched_job *s_job); -void amd_sched_job_begin(struct amd_sched_job *s_job); -static inline void amd_sched_job_get(struct amd_sched_job *job) { - if (job) - kref_get(&job->refcount); -} - -static inline void amd_sched_job_put(struct amd_sched_job *job) { - if (job) - kref_put(&job->refcount, job->free_callback); -} - + struct amd_gpu_scheduler *sched, + struct amd_sched_entity *entity, + void *owner); +void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched); +void amd_sched_job_recovery(struct amd_gpu_scheduler *sched); #endif diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c index 2a732c4..6b63bea 100644 --- a/drivers/gpu/drm/amd/scheduler/sched_fence.c +++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c @@ -27,7 +27,8 @@ #include <drm/drmP.h> #include "gpu_scheduler.h" -struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity, void *owner) +struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity, + void *owner) { struct amd_sched_fence *fence = NULL; unsigned seq; @@ -36,46 +37,37 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity if (fence == NULL) return NULL; - INIT_LIST_HEAD(&fence->scheduled_cb); fence->owner = owner; - fence->sched = s_entity->sched; + fence->sched = entity->sched; spin_lock_init(&fence->lock); - seq = atomic_inc_return(&s_entity->fence_seq); - fence_init(&fence->base, &amd_sched_fence_ops, &fence->lock, - s_entity->fence_context, seq); + seq = atomic_inc_return(&entity->fence_seq); + fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled, + &fence->lock, entity->fence_context, seq); + fence_init(&fence->finished, &amd_sched_fence_ops_finished, + &fence->lock, entity->fence_context + 1, seq); return fence; } -void amd_sched_fence_signal(struct amd_sched_fence *fence) +void amd_sched_fence_scheduled(struct amd_sched_fence *fence) { - int ret = fence_signal(&fence->base); + int ret = fence_signal(&fence->scheduled); + if (!ret) - FENCE_TRACE(&fence->base, "signaled from irq context\n"); + FENCE_TRACE(&fence->scheduled, "signaled from irq context\n"); else - FENCE_TRACE(&fence->base, "was already signaled\n"); -} - -void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched , - struct amd_sched_job *s_job) -{ - unsigned long flags; - spin_lock_irqsave(&sched->job_list_lock, flags); - list_add_tail(&s_job->node, &sched->ring_mirror_list); - sched->ops->begin_job(s_job); - spin_unlock_irqrestore(&sched->job_list_lock, flags); + FENCE_TRACE(&fence->scheduled, "was already signaled\n"); } -void amd_sched_fence_scheduled(struct amd_sched_fence *s_fence) +void amd_sched_fence_finished(struct amd_sched_fence *fence) { - struct fence_cb *cur, *tmp; + int ret = fence_signal(&fence->finished); - set_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &s_fence->base.flags); - list_for_each_entry_safe(cur, tmp, &s_fence->scheduled_cb, node) { - list_del_init(&cur->node); - cur->func(&s_fence->base, cur); - } + if (!ret) + FENCE_TRACE(&fence->finished, "signaled from irq context\n"); + else + FENCE_TRACE(&fence->finished, "was already signaled\n"); } static const char *amd_sched_fence_get_driver_name(struct fence *fence) @@ -105,6 +97,8 @@ static void amd_sched_fence_free(struct rcu_head *rcu) { struct fence *f = container_of(rcu, struct fence, rcu); struct amd_sched_fence *fence = to_amd_sched_fence(f); + + fence_put(fence->parent); kmem_cache_free(sched_fence_slab, fence); } @@ -116,16 +110,41 @@ static void amd_sched_fence_free(struct rcu_head *rcu) * This function is called when the reference count becomes zero. * It just RCU schedules freeing up the fence. */ -static void amd_sched_fence_release(struct fence *f) +static void amd_sched_fence_release_scheduled(struct fence *f) { - call_rcu(&f->rcu, amd_sched_fence_free); + struct amd_sched_fence *fence = to_amd_sched_fence(f); + + call_rcu(&fence->finished.rcu, amd_sched_fence_free); } -const struct fence_ops amd_sched_fence_ops = { +/** + * amd_sched_fence_release_scheduled - drop extra reference + * + * @f: fence + * + * Drop the extra reference from the scheduled fence to the base fence. + */ +static void amd_sched_fence_release_finished(struct fence *f) +{ + struct amd_sched_fence *fence = to_amd_sched_fence(f); + + fence_put(&fence->scheduled); +} + +const struct fence_ops amd_sched_fence_ops_scheduled = { + .get_driver_name = amd_sched_fence_get_driver_name, + .get_timeline_name = amd_sched_fence_get_timeline_name, + .enable_signaling = amd_sched_fence_enable_signaling, + .signaled = NULL, + .wait = fence_default_wait, + .release = amd_sched_fence_release_scheduled, +}; + +const struct fence_ops amd_sched_fence_ops_finished = { .get_driver_name = amd_sched_fence_get_driver_name, .get_timeline_name = amd_sched_fence_get_timeline_name, .enable_signaling = amd_sched_fence_enable_signaling, .signaled = NULL, .wait = fence_default_wait, - .release = amd_sched_fence_release, + .release = amd_sched_fence_release_finished, }; |