summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/radeon/r600.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon/r600.c')
-rw-r--r--drivers/gpu/drm/radeon/r600.c420
1 files changed, 219 insertions, 201 deletions
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 2e4e60e..278f646 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -65,16 +65,11 @@ MODULE_FIRMWARE("radeon/RV710_me.bin");
int r600_debugfs_mc_info_init(struct radeon_device *rdev);
-/* This files gather functions specifics to:
- * r600,rv610,rv630,rv620,rv635,rv670
- *
- * Some of these functions might be used by newer ASICs.
- */
+/* r600,rv610,rv630,rv620,rv635,rv670 */
int r600_mc_wait_for_idle(struct radeon_device *rdev);
void r600_gpu_init(struct radeon_device *rdev);
void r600_fini(struct radeon_device *rdev);
-
/*
* R600 PCIE GART
*/
@@ -168,7 +163,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev)
WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
- WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end - 1) >> 12);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
@@ -225,6 +220,40 @@ void r600_pcie_gart_fini(struct radeon_device *rdev)
radeon_gart_fini(rdev);
}
+void r600_agp_enable(struct radeon_device *rdev)
+{
+ u32 tmp;
+ int i;
+
+ /* Setup L2 cache */
+ WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
+ ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+ EFFECTIVE_L2_QUEUE_SIZE(7));
+ WREG32(VM_L2_CNTL2, 0);
+ WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
+ /* Setup TLB control */
+ tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
+ SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+ EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
+ ENABLE_WAIT_L2_QUERY;
+ WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
+ WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
+ WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
+ for (i = 0; i < 7; i++)
+ WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+}
+
int r600_mc_wait_for_idle(struct radeon_device *rdev)
{
unsigned i;
@@ -240,14 +269,9 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev)
return -1;
}
-static void r600_mc_resume(struct radeon_device *rdev)
+static void r600_mc_program(struct radeon_device *rdev)
{
- u32 d1vga_control, d2vga_control;
- u32 vga_render_control, vga_hdp_control;
- u32 d1crtc_control, d2crtc_control;
- u32 new_d1grph_primary, new_d1grph_secondary;
- u32 new_d2grph_primary, new_d2grph_secondary;
- u64 old_vram_start;
+ struct rv515_mc_save save;
u32 tmp;
int i, j;
@@ -261,85 +285,51 @@ static void r600_mc_resume(struct radeon_device *rdev)
}
WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
- d1vga_control = RREG32(D1VGA_CONTROL);
- d2vga_control = RREG32(D2VGA_CONTROL);
- vga_render_control = RREG32(VGA_RENDER_CONTROL);
- vga_hdp_control = RREG32(VGA_HDP_CONTROL);
- d1crtc_control = RREG32(D1CRTC_CONTROL);
- d2crtc_control = RREG32(D2CRTC_CONTROL);
- old_vram_start = (u64)(RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
- new_d1grph_primary = RREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS);
- new_d1grph_secondary = RREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS);
- new_d1grph_primary += rdev->mc.vram_start - old_vram_start;
- new_d1grph_secondary += rdev->mc.vram_start - old_vram_start;
- new_d2grph_primary = RREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS);
- new_d2grph_secondary = RREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS);
- new_d2grph_primary += rdev->mc.vram_start - old_vram_start;
- new_d2grph_secondary += rdev->mc.vram_start - old_vram_start;
-
- /* Stop all video */
- WREG32(D1VGA_CONTROL, 0);
- WREG32(D2VGA_CONTROL, 0);
- WREG32(VGA_RENDER_CONTROL, 0);
- WREG32(D1CRTC_UPDATE_LOCK, 1);
- WREG32(D2CRTC_UPDATE_LOCK, 1);
- WREG32(D1CRTC_CONTROL, 0);
- WREG32(D2CRTC_CONTROL, 0);
- WREG32(D1CRTC_UPDATE_LOCK, 0);
- WREG32(D2CRTC_UPDATE_LOCK, 0);
-
- mdelay(1);
+ rv515_mc_stop(rdev, &save);
if (r600_mc_wait_for_idle(rdev)) {
- printk(KERN_WARNING "[drm] MC not idle !\n");
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
-
- /* Lockout access through VGA aperture*/
+ /* Lockout access through VGA aperture (doesn't exist before R600) */
WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
-
/* Update configuration */
- WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
- WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, (rdev->mc.vram_end - 1) >> 12);
+ if (rdev->flags & RADEON_IS_AGP) {
+ if (rdev->mc.vram_start < rdev->mc.gtt_start) {
+ /* VRAM before AGP */
+ WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ rdev->mc.vram_start >> 12);
+ WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ rdev->mc.gtt_end >> 12);
+ } else {
+ /* VRAM after AGP */
+ WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ rdev->mc.gtt_start >> 12);
+ WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ rdev->mc.vram_end >> 12);
+ }
+ } else {
+ WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
+ WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
+ }
WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
- tmp = (((rdev->mc.vram_end - 1) >> 24) & 0xFFFF) << 16;
+ tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
WREG32(MC_VM_FB_LOCATION, tmp);
WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
WREG32(HDP_NONSURFACE_INFO, (2 << 7));
- WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF);
+ WREG32(HDP_NONSURFACE_SIZE, rdev->mc.mc_vram_size | 0x3FF);
if (rdev->flags & RADEON_IS_AGP) {
- WREG32(MC_VM_AGP_TOP, (rdev->mc.gtt_end - 1) >> 16);
- WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
+ WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
+ WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
} else {
WREG32(MC_VM_AGP_BASE, 0);
WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
}
- WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS, new_d1grph_primary);
- WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS, new_d1grph_secondary);
- WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS, new_d2grph_primary);
- WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS, new_d2grph_secondary);
- WREG32(VGA_MEMORY_BASE_ADDRESS, rdev->mc.vram_start);
-
- /* Unlock host access */
- WREG32(VGA_HDP_CONTROL, vga_hdp_control);
-
- mdelay(1);
if (r600_mc_wait_for_idle(rdev)) {
- printk(KERN_WARNING "[drm] MC not idle !\n");
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
-
- /* Restore video state */
- WREG32(D1CRTC_UPDATE_LOCK, 1);
- WREG32(D2CRTC_UPDATE_LOCK, 1);
- WREG32(D1CRTC_CONTROL, d1crtc_control);
- WREG32(D2CRTC_CONTROL, d2crtc_control);
- WREG32(D1CRTC_UPDATE_LOCK, 0);
- WREG32(D2CRTC_UPDATE_LOCK, 0);
- WREG32(D1VGA_CONTROL, d1vga_control);
- WREG32(D2VGA_CONTROL, d2vga_control);
- WREG32(VGA_RENDER_CONTROL, vga_render_control);
-
+ rv515_mc_resume(rdev, &save);
/* we need to own VRAM, so turn off the VGA renderer here
* to stop it overwriting our objects */
rv515_vga_render_disable(rdev);
@@ -349,11 +339,10 @@ int r600_mc_init(struct radeon_device *rdev)
{
fixed20_12 a;
u32 tmp;
- int chansize;
+ int chansize, numchan;
int r;
/* Get VRAM informations */
- rdev->mc.vram_width = 128;
rdev->mc.vram_is_ddr = true;
tmp = RREG32(RAMCFG);
if (tmp & CHANSIZE_OVERRIDE) {
@@ -363,17 +352,23 @@ int r600_mc_init(struct radeon_device *rdev)
} else {
chansize = 32;
}
- if (rdev->family == CHIP_R600) {
- rdev->mc.vram_width = 8 * chansize;
- } else if (rdev->family == CHIP_RV670) {
- rdev->mc.vram_width = 4 * chansize;
- } else if ((rdev->family == CHIP_RV610) ||
- (rdev->family == CHIP_RV620)) {
- rdev->mc.vram_width = chansize;
- } else if ((rdev->family == CHIP_RV630) ||
- (rdev->family == CHIP_RV635)) {
- rdev->mc.vram_width = 2 * chansize;
+ tmp = RREG32(CHMAP);
+ switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+ case 0:
+ default:
+ numchan = 1;
+ break;
+ case 1:
+ numchan = 2;
+ break;
+ case 2:
+ numchan = 4;
+ break;
+ case 3:
+ numchan = 8;
+ break;
}
+ rdev->mc.vram_width = numchan * chansize;
/* Could aper size report 0 ? */
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
@@ -414,40 +409,34 @@ int r600_mc_init(struct radeon_device *rdev)
rdev->mc.gtt_location = rdev->mc.mc_vram_size;
}
} else {
- if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) {
- rdev->mc.vram_location = (RREG32(MC_VM_FB_LOCATION) &
- 0xFFFF) << 24;
- rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
- tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
- if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
- /* Enough place after vram */
- rdev->mc.gtt_location = tmp;
- } else if (rdev->mc.vram_location >= rdev->mc.gtt_size) {
- /* Enough place before vram */
+ rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+ rdev->mc.vram_location = (RREG32(MC_VM_FB_LOCATION) &
+ 0xFFFF) << 24;
+ tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
+ if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
+ /* Enough place after vram */
+ rdev->mc.gtt_location = tmp;
+ } else if (rdev->mc.vram_location >= rdev->mc.gtt_size) {
+ /* Enough place before vram */
+ rdev->mc.gtt_location = 0;
+ } else {
+ /* Not enough place after or before shrink
+ * gart size
+ */
+ if (rdev->mc.vram_location > (0xFFFFFFFFUL - tmp)) {
rdev->mc.gtt_location = 0;
+ rdev->mc.gtt_size = rdev->mc.vram_location;
} else {
- /* Not enough place after or before shrink
- * gart size
- */
- if (rdev->mc.vram_location > (0xFFFFFFFFUL - tmp)) {
- rdev->mc.gtt_location = 0;
- rdev->mc.gtt_size = rdev->mc.vram_location;
- } else {
- rdev->mc.gtt_location = tmp;
- rdev->mc.gtt_size = 0xFFFFFFFFUL - tmp;
- }
+ rdev->mc.gtt_location = tmp;
+ rdev->mc.gtt_size = 0xFFFFFFFFUL - tmp;
}
- rdev->mc.gtt_location = rdev->mc.mc_vram_size;
- } else {
- rdev->mc.vram_location = 0x00000000UL;
- rdev->mc.gtt_location = rdev->mc.mc_vram_size;
- rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
}
+ rdev->mc.gtt_location = rdev->mc.mc_vram_size;
}
rdev->mc.vram_start = rdev->mc.vram_location;
- rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size;
+ rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
rdev->mc.gtt_start = rdev->mc.gtt_location;
- rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size;
+ rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
/* FIXME: we should enforce default clock in case GPU is not in
* default setup
*/
@@ -463,6 +452,7 @@ int r600_mc_init(struct radeon_device *rdev)
*/
int r600_gpu_soft_reset(struct radeon_device *rdev)
{
+ struct rv515_mc_save save;
u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
@@ -480,13 +470,25 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
u32 srbm_reset = 0;
+ u32 tmp;
+ dev_info(rdev->dev, "GPU softreset \n");
+ dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
+ RREG32(R_008010_GRBM_STATUS));
+ dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
+ RREG32(R_008014_GRBM_STATUS2));
+ dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
+ RREG32(R_000E50_SRBM_STATUS));
+ rv515_mc_stop(rdev, &save);
+ if (r600_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ }
/* Disable CP parsing/prefetching */
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(0xff));
/* Check if any of the rendering block is busy and reset it */
if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
(RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
- WREG32(R_008020_GRBM_SOFT_RESET, S_008020_SOFT_RESET_CR(1) |
+ tmp = S_008020_SOFT_RESET_CR(1) |
S_008020_SOFT_RESET_DB(1) |
S_008020_SOFT_RESET_CB(1) |
S_008020_SOFT_RESET_PA(1) |
@@ -498,14 +500,18 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
S_008020_SOFT_RESET_TC(1) |
S_008020_SOFT_RESET_TA(1) |
S_008020_SOFT_RESET_VC(1) |
- S_008020_SOFT_RESET_VGT(1));
+ S_008020_SOFT_RESET_VGT(1);
+ dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(R_008020_GRBM_SOFT_RESET, tmp);
(void)RREG32(R_008020_GRBM_SOFT_RESET);
udelay(50);
WREG32(R_008020_GRBM_SOFT_RESET, 0);
(void)RREG32(R_008020_GRBM_SOFT_RESET);
}
/* Reset CP (we always reset CP) */
- WREG32(R_008020_GRBM_SOFT_RESET, S_008020_SOFT_RESET_CP(1));
+ tmp = S_008020_SOFT_RESET_CP(1);
+ dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(R_008020_GRBM_SOFT_RESET, tmp);
(void)RREG32(R_008020_GRBM_SOFT_RESET);
udelay(50);
WREG32(R_008020_GRBM_SOFT_RESET, 0);
@@ -533,6 +539,14 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS)))
srbm_reset |= S_000E60_SOFT_RESET_SEM(1);
+ if (G_000E50_BIF_BUSY(RREG32(R_000E50_SRBM_STATUS)))
+ srbm_reset |= S_000E60_SOFT_RESET_BIF(1);
+ dev_info(rdev->dev, " R_000E60_SRBM_SOFT_RESET=0x%08X\n", srbm_reset);
+ WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
+ (void)RREG32(R_000E60_SRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(R_000E60_SRBM_SOFT_RESET, 0);
+ (void)RREG32(R_000E60_SRBM_SOFT_RESET);
WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
(void)RREG32(R_000E60_SRBM_SOFT_RESET);
udelay(50);
@@ -540,6 +554,17 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
(void)RREG32(R_000E60_SRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
+ dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
+ RREG32(R_008010_GRBM_STATUS));
+ dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
+ RREG32(R_008014_GRBM_STATUS2));
+ dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
+ RREG32(R_000E50_SRBM_STATUS));
+ /* After reset we need to reinit the asic as GPU often endup in an
+ * incoherent state.
+ */
+ atom_asic_init(rdev->mode_info.atom_context);
+ rv515_mc_resume(rdev, &save);
return 0;
}
@@ -833,7 +858,8 @@ void r600_gpu_init(struct radeon_device *rdev)
((rdev->family) == CHIP_RV630) ||
((rdev->family) == CHIP_RV610) ||
((rdev->family) == CHIP_RV620) ||
- ((rdev->family) == CHIP_RS780)) {
+ ((rdev->family) == CHIP_RS780) ||
+ ((rdev->family) == CHIP_RS880)) {
WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
} else {
WREG32(DB_DEBUG, 0);
@@ -850,7 +876,8 @@ void r600_gpu_init(struct radeon_device *rdev)
tmp = RREG32(SQ_MS_FIFO_SIZES);
if (((rdev->family) == CHIP_RV610) ||
((rdev->family) == CHIP_RV620) ||
- ((rdev->family) == CHIP_RS780)) {
+ ((rdev->family) == CHIP_RS780) ||
+ ((rdev->family) == CHIP_RS880)) {
tmp = (CACHE_FIFO_SIZE(0xa) |
FETCH_FIFO_HIWATER(0xa) |
DONE_FIFO_HIWATER(0xe0) |
@@ -893,7 +920,8 @@ void r600_gpu_init(struct radeon_device *rdev)
NUM_ES_STACK_ENTRIES(0));
} else if (((rdev->family) == CHIP_RV610) ||
((rdev->family) == CHIP_RV620) ||
- ((rdev->family) == CHIP_RS780)) {
+ ((rdev->family) == CHIP_RS780) ||
+ ((rdev->family) == CHIP_RS880)) {
/* no vertex cache */
sq_config &= ~VC_ENABLE;
@@ -950,7 +978,8 @@ void r600_gpu_init(struct radeon_device *rdev)
if (((rdev->family) == CHIP_RV610) ||
((rdev->family) == CHIP_RV620) ||
- ((rdev->family) == CHIP_RS780)) {
+ ((rdev->family) == CHIP_RS780) ||
+ ((rdev->family) == CHIP_RS880)) {
WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
} else {
WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
@@ -976,8 +1005,9 @@ void r600_gpu_init(struct radeon_device *rdev)
tmp = rdev->config.r600.max_pipes * 16;
switch (rdev->family) {
case CHIP_RV610:
- case CHIP_RS780:
case CHIP_RV620:
+ case CHIP_RS780:
+ case CHIP_RS880:
tmp += 32;
break;
case CHIP_RV670:
@@ -1018,8 +1048,9 @@ void r600_gpu_init(struct radeon_device *rdev)
switch (rdev->family) {
case CHIP_RV610:
- case CHIP_RS780:
case CHIP_RV620:
+ case CHIP_RS780:
+ case CHIP_RS880:
tmp = TC_L2_SIZE(8);
break;
case CHIP_RV630:
@@ -1241,19 +1272,17 @@ int r600_cp_resume(struct radeon_device *rdev)
/* Set ring buffer size */
rb_bufsz = drm_order(rdev->cp.ring_size / 8);
+ tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
- WREG32(CP_RB_CNTL, BUF_SWAP_32BIT | RB_NO_UPDATE |
- (drm_order(4096/8) << 8) | rb_bufsz);
-#else
- WREG32(CP_RB_CNTL, RB_NO_UPDATE | (drm_order(4096/8) << 8) | rb_bufsz);
+ tmp |= BUF_SWAP_32BIT;
#endif
+ WREG32(CP_RB_CNTL, tmp);
WREG32(CP_SEM_WAIT_TIMER, 0x4);
/* Set the write pointer delay */
WREG32(CP_RB_WPTR_DELAY, 0);
/* Initialize the ring buffer's read and write pointers */
- tmp = RREG32(CP_RB_CNTL);
WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
WREG32(CP_RB_RPTR_WR, 0);
WREG32(CP_RB_WPTR, 0);
@@ -1350,32 +1379,47 @@ int r600_ring_test(struct radeon_device *rdev)
return r;
}
-/*
- * Writeback
- */
-int r600_wb_init(struct radeon_device *rdev)
+void r600_wb_disable(struct radeon_device *rdev)
+{
+ WREG32(SCRATCH_UMSK, 0);
+ if (rdev->wb.wb_obj) {
+ radeon_object_kunmap(rdev->wb.wb_obj);
+ radeon_object_unpin(rdev->wb.wb_obj);
+ }
+}
+
+void r600_wb_fini(struct radeon_device *rdev)
+{
+ r600_wb_disable(rdev);
+ if (rdev->wb.wb_obj) {
+ radeon_object_unref(&rdev->wb.wb_obj);
+ rdev->wb.wb = NULL;
+ rdev->wb.wb_obj = NULL;
+ }
+}
+
+int r600_wb_enable(struct radeon_device *rdev)
{
int r;
if (rdev->wb.wb_obj == NULL) {
- r = radeon_object_create(rdev, NULL, 4096,
- true,
- RADEON_GEM_DOMAIN_GTT,
- false, &rdev->wb.wb_obj);
+ r = radeon_object_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_GTT, false, &rdev->wb.wb_obj);
if (r) {
- DRM_ERROR("radeon: failed to create WB buffer (%d).\n", r);
+ dev_warn(rdev->dev, "failed to create WB buffer (%d).\n", r);
return r;
}
- r = radeon_object_pin(rdev->wb.wb_obj,
- RADEON_GEM_DOMAIN_GTT,
- &rdev->wb.gpu_addr);
+ r = radeon_object_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
+ &rdev->wb.gpu_addr);
if (r) {
- DRM_ERROR("radeon: failed to pin WB buffer (%d).\n", r);
+ dev_warn(rdev->dev, "failed to pin WB buffer (%d).\n", r);
+ r600_wb_fini(rdev);
return r;
}
r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
if (r) {
- DRM_ERROR("radeon: failed to map WB buffer (%d).\n", r);
+ dev_warn(rdev->dev, "failed to map WB buffer (%d).\n", r);
+ r600_wb_fini(rdev);
return r;
}
}
@@ -1386,21 +1430,6 @@ int r600_wb_init(struct radeon_device *rdev)
return 0;
}
-void r600_wb_fini(struct radeon_device *rdev)
-{
- if (rdev->wb.wb_obj) {
- radeon_object_kunmap(rdev->wb.wb_obj);
- radeon_object_unpin(rdev->wb.wb_obj);
- radeon_object_unref(&rdev->wb.wb_obj);
- rdev->wb.wb = NULL;
- rdev->wb.wb_obj = NULL;
- }
-}
-
-
-/*
- * CS
- */
void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
@@ -1424,8 +1453,8 @@ int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_pages, struct radeon_fence *fence)
{
- r600_blit_prepare_copy(rdev, num_pages * 4096);
- r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * 4096);
+ r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
+ r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
r600_blit_done_copy(rdev, fence);
return 0;
}
@@ -1477,11 +1506,14 @@ int r600_startup(struct radeon_device *rdev)
{
int r;
- r600_gpu_reset(rdev);
- r600_mc_resume(rdev);
- r = r600_pcie_gart_enable(rdev);
- if (r)
- return r;
+ r600_mc_program(rdev);
+ if (rdev->flags & RADEON_IS_AGP) {
+ r600_agp_enable(rdev);
+ } else {
+ r = r600_pcie_gart_enable(rdev);
+ if (r)
+ return r;
+ }
r600_gpu_init(rdev);
r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
@@ -1500,9 +1532,8 @@ int r600_startup(struct radeon_device *rdev)
r = r600_cp_resume(rdev);
if (r)
return r;
- r = r600_wb_init(rdev);
- if (r)
- return r;
+ /* write back buffer are not vital so don't worry about failure */
+ r600_wb_enable(rdev);
return 0;
}
@@ -1524,15 +1555,12 @@ int r600_resume(struct radeon_device *rdev)
{
int r;
- if (radeon_gpu_reset(rdev)) {
- /* FIXME: what do we want to do here ? */
- }
+ /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
+ * posting will perform necessary task to bring back GPU into good
+ * shape.
+ */
/* post card */
- if (rdev->is_atom_bios) {
- atom_asic_init(rdev->mode_info.atom_context);
- } else {
- radeon_combios_asic_init(rdev->ddev);
- }
+ atom_asic_init(rdev->mode_info.atom_context);
/* Initialize clocks */
r = radeon_clocks_init(rdev);
if (r) {
@@ -1545,7 +1573,7 @@ int r600_resume(struct radeon_device *rdev)
return r;
}
- r = radeon_ib_test(rdev);
+ r = r600_ib_test(rdev);
if (r) {
DRM_ERROR("radeon: failled testing IB (%d).\n", r);
return r;
@@ -1553,13 +1581,12 @@ int r600_resume(struct radeon_device *rdev)
return r;
}
-
int r600_suspend(struct radeon_device *rdev)
{
/* FIXME: we should wait for ring to be empty */
r600_cp_stop(rdev);
rdev->cp.ready = false;
-
+ r600_wb_disable(rdev);
r600_pcie_gart_disable(rdev);
/* unpin shaders bo */
radeon_object_unpin(rdev->r600_blit.shader_obj);
@@ -1576,7 +1603,6 @@ int r600_init(struct radeon_device *rdev)
{
int r;
- rdev->new_init_path = true;
r = radeon_dummy_page_init(rdev);
if (r)
return r;
@@ -1593,8 +1619,10 @@ int r600_init(struct radeon_device *rdev)
return -EINVAL;
}
/* Must be an ATOMBIOS */
- if (!rdev->is_atom_bios)
+ if (!rdev->is_atom_bios) {
+ dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
return -EINVAL;
+ }
r = radeon_atombios_init(rdev);
if (r)
return r;
@@ -1607,24 +1635,20 @@ int r600_init(struct radeon_device *rdev)
r600_scratch_init(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
+ /* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
r = radeon_clocks_init(rdev);
if (r)
return r;
+ /* Initialize power management */
+ radeon_pm_init(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
if (r)
return r;
r = r600_mc_init(rdev);
- if (r) {
- if (rdev->flags & RADEON_IS_AGP) {
- /* Retry with disabling AGP */
- r600_fini(rdev);
- rdev->flags &= ~RADEON_IS_AGP;
- return r600_init(rdev);
- }
+ if (r)
return r;
- }
/* Memory manager */
r = radeon_object_init(rdev);
if (r)
@@ -1653,12 +1677,10 @@ int r600_init(struct radeon_device *rdev)
r = r600_startup(rdev);
if (r) {
- if (rdev->flags & RADEON_IS_AGP) {
- /* Retry with disabling AGP */
- r600_fini(rdev);
- rdev->flags &= ~RADEON_IS_AGP;
- return r600_init(rdev);
- }
+ r600_suspend(rdev);
+ r600_wb_fini(rdev);
+ radeon_ring_fini(rdev);
+ r600_pcie_gart_fini(rdev);
rdev->accel_working = false;
}
if (rdev->accel_working) {
@@ -1667,7 +1689,7 @@ int r600_init(struct radeon_device *rdev)
DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
rdev->accel_working = false;
}
- r = radeon_ib_test(rdev);
+ r = r600_ib_test(rdev);
if (r) {
DRM_ERROR("radeon: failled testing IB (%d).\n", r);
rdev->accel_working = false;
@@ -1683,19 +1705,15 @@ void r600_fini(struct radeon_device *rdev)
r600_blit_fini(rdev);
radeon_ring_fini(rdev);
+ r600_wb_fini(rdev);
r600_pcie_gart_fini(rdev);
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_clocks_fini(rdev);
-#if __OS_HAS_AGP
if (rdev->flags & RADEON_IS_AGP)
radeon_agp_fini(rdev);
-#endif
radeon_object_fini(rdev);
- if (rdev->is_atom_bios)
- radeon_atombios_fini(rdev);
- else
- radeon_combios_fini(rdev);
+ radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
radeon_dummy_page_fini(rdev);