diff options
Diffstat (limited to 'drivers/gpu/drm')
25 files changed, 272 insertions, 67 deletions
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 6977a1c..f73ef43 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -672,7 +672,7 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) struct drm_crtc_helper_funcs *crtc_funcs; u16 *red, *green, *blue, *transp; struct drm_crtc *crtc; - int i, rc = 0; + int i, j, rc = 0; int start; for (i = 0; i < fb_helper->crtc_count; i++) { @@ -685,7 +685,7 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) transp = cmap->transp; start = cmap->start; - for (i = 0; i < cmap->len; i++) { + for (j = 0; j < cmap->len; j++) { u16 hred, hgreen, hblue, htransp = 0xffff; hred = *red++; diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 3dadfa2..28d1d3c 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -164,8 +164,10 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc) * available. In that case we can't account for this and just * hope for the best. */ - if ((vblrc > 0) && (abs(diff_ns) > 1000000)) + if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) { atomic_inc(&dev->_vblank_count[crtc]); + smp_mb__after_atomic_inc(); + } /* Invalidate all timestamps while vblank irq's are off. */ clear_vblank_timestamps(dev, crtc); @@ -491,6 +493,12 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc) /* Dot clock in Hz: */ dotclock = (u64) crtc->hwmode.clock * 1000; + /* Fields of interlaced scanout modes are only halve a frame duration. + * Double the dotclock to get halve the frame-/line-/pixelduration. + */ + if (crtc->hwmode.flags & DRM_MODE_FLAG_INTERLACE) + dotclock *= 2; + /* Valid dotclock? */ if (dotclock > 0) { /* Convert scanline length in pixels and video dot clock to @@ -603,14 +611,6 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, return -EAGAIN; } - /* Don't know yet how to handle interlaced or - * double scan modes. Just no-op for now. - */ - if (mode->flags & (DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN)) { - DRM_DEBUG("crtc %d: Noop due to unsupported mode.\n", crtc); - return -ENOTSUPP; - } - /* Get current scanout position with system timestamp. * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times * if single query takes longer than max_error nanoseconds. @@ -858,10 +858,11 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc) if (rc) { tslot = atomic_read(&dev->_vblank_count[crtc]) + diff; vblanktimestamp(dev, crtc, tslot) = t_vblank; - smp_wmb(); } + smp_mb__before_atomic_inc(); atomic_add(diff, &dev->_vblank_count[crtc]); + smp_mb__after_atomic_inc(); } /** @@ -1011,7 +1012,8 @@ int drm_modeset_ctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_modeset_ctl *modeset = data; - int crtc, ret = 0; + int ret = 0; + unsigned int crtc; /* If drm_vblank_init() hasn't been called yet, just no-op */ if (!dev->num_crtcs) @@ -1293,15 +1295,16 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc) * e.g., due to spurious vblank interrupts. We need to * ignore those for accounting. */ - if (abs(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) { + if (abs64(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) { /* Store new timestamp in ringbuffer. */ vblanktimestamp(dev, crtc, vblcount + 1) = tvblank; - smp_wmb(); /* Increment cooked vblank count. This also atomically commits * the timestamp computed above. */ + smp_mb__before_atomic_inc(); atomic_inc(&dev->_vblank_count[crtc]); + smp_mb__after_atomic_inc(); } else { DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n", crtc, (int) diff_ns); diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 3601466..4ff9b6c 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -865,7 +865,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) int max_freq; /* RPSTAT1 is in the GT power well */ - __gen6_force_wake_get(dev_priv); + __gen6_gt_force_wake_get(dev_priv); seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1)); @@ -888,7 +888,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", max_freq * 100); - __gen6_force_wake_put(dev_priv); + __gen6_gt_force_wake_put(dev_priv); } else { seq_printf(m, "no P-state info available\n"); } diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 17bd766..e33d9be 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1895,6 +1895,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) if (IS_GEN2(dev)) dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); + /* 965GM sometimes incorrectly writes to hardware status page (HWS) + * using 32bit addressing, overwriting memory if HWS is located + * above 4GB. + * + * The documentation also mentions an issue with undefined + * behaviour if any general state is accessed within a page above 4GB, + * which also needs to be handled carefully. + */ + if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) + dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); + mmio_bar = IS_GEN2(dev) ? 1 : 0; dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0); if (!dev_priv->regs) { diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 0ad533f..22ec066 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -46,6 +46,9 @@ module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); unsigned int i915_powersave = 1; module_param_named(powersave, i915_powersave, int, 0600); +unsigned int i915_semaphores = 0; +module_param_named(semaphores, i915_semaphores, int, 0600); + unsigned int i915_enable_rc6 = 0; module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600); @@ -254,7 +257,7 @@ void intel_detect_pch (struct drm_device *dev) } } -void __gen6_force_wake_get(struct drm_i915_private *dev_priv) +void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) { int count; @@ -270,12 +273,22 @@ void __gen6_force_wake_get(struct drm_i915_private *dev_priv) udelay(10); } -void __gen6_force_wake_put(struct drm_i915_private *dev_priv) +void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) { I915_WRITE_NOTRACE(FORCEWAKE, 0); POSTING_READ(FORCEWAKE); } +void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) +{ + int loop = 500; + u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); + while (fifo < 20 && loop--) { + udelay(10); + fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); + } +} + static int i915_drm_freeze(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 65dfe81..456f404 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -956,6 +956,7 @@ extern struct drm_ioctl_desc i915_ioctls[]; extern int i915_max_ioctl; extern unsigned int i915_fbpercrtc; extern unsigned int i915_powersave; +extern unsigned int i915_semaphores; extern unsigned int i915_lvds_downclock; extern unsigned int i915_panel_use_ssc; extern unsigned int i915_enable_rc6; @@ -1177,6 +1178,9 @@ void i915_gem_detach_phys_object(struct drm_device *dev, void i915_gem_free_all_phys_object(struct drm_device *dev); void i915_gem_release(struct drm_device *dev, struct drm_file *file); +uint32_t +i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj); + /* i915_gem_gtt.c */ void i915_gem_restore_gtt_mappings(struct drm_device *dev); int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); @@ -1353,22 +1357,32 @@ __i915_write(64, q) * must be set to prevent GT core from power down and stale values being * returned. */ -void __gen6_force_wake_get(struct drm_i915_private *dev_priv); -void __gen6_force_wake_put (struct drm_i915_private *dev_priv); -static inline u32 i915_safe_read(struct drm_i915_private *dev_priv, u32 reg) +void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); +void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); +void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv); + +static inline u32 i915_gt_read(struct drm_i915_private *dev_priv, u32 reg) { u32 val; if (dev_priv->info->gen >= 6) { - __gen6_force_wake_get(dev_priv); + __gen6_gt_force_wake_get(dev_priv); val = I915_READ(reg); - __gen6_force_wake_put(dev_priv); + __gen6_gt_force_wake_put(dev_priv); } else val = I915_READ(reg); return val; } +static inline void i915_gt_write(struct drm_i915_private *dev_priv, + u32 reg, u32 val) +{ + if (dev_priv->info->gen >= 6) + __gen6_gt_wait_for_fifo(dev_priv); + I915_WRITE(reg, val); +} + static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len) { diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index cf4f74c..36e66cc 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1398,7 +1398,7 @@ i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj) * Return the required GTT alignment for an object, only taking into account * unfenced tiled surface requirements. */ -static uint32_t +uint32_t i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj) { struct drm_device *dev = obj->base.dev; diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index d2f445e..50ab161 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -772,8 +772,8 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj, if (from == NULL || to == from) return 0; - /* XXX gpu semaphores are currently causing hard hangs on SNB mobile */ - if (INTEL_INFO(obj->base.dev)->gen < 6 || IS_MOBILE(obj->base.dev)) + /* XXX gpu semaphores are implicated in various hard hangs on SNB */ + if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores) return i915_gem_object_wait_rendering(obj, true); idx = intel_ring_sync_index(from, to); diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 22a32b9..d64843e 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -349,14 +349,27 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end && i915_gem_object_fence_ok(obj, args->tiling_mode)); - obj->tiling_changed = true; - obj->tiling_mode = args->tiling_mode; - obj->stride = args->stride; + /* Rebind if we need a change of alignment */ + if (!obj->map_and_fenceable) { + u32 unfenced_alignment = + i915_gem_get_unfenced_gtt_alignment(obj); + if (obj->gtt_offset & (unfenced_alignment - 1)) + ret = i915_gem_object_unbind(obj); + } + + if (ret == 0) { + obj->tiling_changed = true; + obj->tiling_mode = args->tiling_mode; + obj->stride = args->stride; + } } + /* we have to maintain this existing ABI... */ + args->stride = obj->stride; + args->tiling_mode = obj->tiling_mode; drm_gem_object_unreference(&obj->base); mutex_unlock(&dev->struct_mutex); - return 0; + return ret; } /** diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 97f946dc..8a9e08b 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -316,6 +316,8 @@ static void i915_hotplug_work_func(struct work_struct *work) struct drm_mode_config *mode_config = &dev->mode_config; struct intel_encoder *encoder; + DRM_DEBUG_KMS("running encoder hotplug functions\n"); + list_for_each_entry(encoder, &mode_config->encoder_list, base.head) if (encoder->hot_plug) encoder->hot_plug(encoder); @@ -1649,9 +1651,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev) } else { hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; - hotplug_mask |= SDE_AUX_MASK | SDE_FDI_MASK | SDE_TRANS_MASK; - I915_WRITE(FDI_RXA_IMR, 0); - I915_WRITE(FDI_RXB_IMR, 0); + hotplug_mask |= SDE_AUX_MASK; } dev_priv->pch_irq_mask = ~hotplug_mask; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 729d423..2abe240 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1553,7 +1553,17 @@ /* Backlight control */ #define BLC_PWM_CTL 0x61254 +#define BACKLIGHT_MODULATION_FREQ_SHIFT (17) #define BLC_PWM_CTL2 0x61250 /* 965+ only */ +#define BLM_COMBINATION_MODE (1 << 30) +/* + * This is the most significant 15 bits of the number of backlight cycles in a + * complete cycle of the modulated backlight control. + * + * The actual value is this field multiplied by two. + */ +#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17) +#define BLM_LEGACY_MODE (1 << 16) /* * This is the number of cycles out of the backlight modulation cycle for which * the backlight is on. @@ -3261,6 +3271,8 @@ #define FORCEWAKE 0xA18C #define FORCEWAKE_ACK 0x130090 +#define GT_FIFO_FREE_ENTRIES 0x120008 + #define GEN6_RPNSWREQ 0xA008 #define GEN6_TURBO_DISABLE (1<<31) #define GEN6_FREQUENCY(x) ((x)<<25) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3b00653..49fb54f 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1219,7 +1219,7 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev) u32 blt_ecoskpd; /* Make sure blitter notifies FBC of writes */ - __gen6_force_wake_get(dev_priv); + __gen6_gt_force_wake_get(dev_priv); blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD); blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY << GEN6_BLITTER_LOCK_SHIFT; @@ -1230,7 +1230,7 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev) GEN6_BLITTER_LOCK_SHIFT); I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); POSTING_READ(GEN6_BLITTER_ECOSKPD); - __gen6_force_wake_put(dev_priv); + __gen6_gt_force_wake_put(dev_priv); } static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) @@ -1630,19 +1630,19 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; wait_event(dev_priv->pending_flip_queue, + atomic_read(&dev_priv->mm.wedged) || atomic_read(&obj->pending_flip) == 0); /* Big Hammer, we also need to ensure that any pending * MI_WAIT_FOR_EVENT inside a user batch buffer on the * current scanout is retired before unpinning the old * framebuffer. + * + * This should only fail upon a hung GPU, in which case we + * can safely continue. */ ret = i915_gem_object_flush_gpu(obj, false); - if (ret) { - i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); - mutex_unlock(&dev->struct_mutex); - return ret; - } + (void) ret; } ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, @@ -2045,6 +2045,31 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) atomic_read(&obj->pending_flip) == 0); } +static bool intel_crtc_driving_pch(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_mode_config *mode_config = &dev->mode_config; + struct intel_encoder *encoder; + + /* + * If there's a non-PCH eDP on this crtc, it must be DP_A, and that + * must be driven by its own crtc; no sharing is possible. + */ + list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { + if (encoder->base.crtc != crtc) + continue; + + switch (encoder->type) { + case INTEL_OUTPUT_EDP: + if (!intel_encoder_is_pch_edp(&encoder->base)) + return false; + continue; + } + } + + return true; +} + static void ironlake_crtc_enable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; @@ -2053,6 +2078,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; u32 reg, temp; + bool is_pch_port = false; if (intel_crtc->active) return; @@ -2066,7 +2092,56 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); } - ironlake_fdi_enable(crtc); + is_pch_port = intel_crtc_driving_pch(crtc); + + if (is_pch_port) + ironlake_fdi_enable(crtc); + else { + /* disable CPU FDI tx and PCH FDI rx */ + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); + I915_WRITE(reg, temp & ~FDI_TX_ENABLE); + POSTING_READ(reg); + + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); + temp &= ~(0x7 << 16); + temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; + I915_WRITE(reg, temp & ~FDI_RX_ENABLE); + + POSTING_READ(reg); + udelay(100); + + /* Ironlake workaround, disable clock pointer after downing FDI */ + if (HAS_PCH_IBX(dev)) + I915_WRITE(FDI_RX_CHICKEN(pipe), + I915_READ(FDI_RX_CHICKEN(pipe) & + ~FDI_RX_PHASE_SYNC_POINTER_ENABLE)); + + /* still set train pattern 1 */ + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_PATTERN_1; + I915_WRITE(reg, temp); + + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); + if (HAS_PCH_CPT(dev)) { + temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; + temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; + } else { + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_PATTERN_1; + } + /* BPC in FDI rx is consistent with that in PIPECONF */ + temp &= ~(0x07 << 16); + temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; + I915_WRITE(reg, temp); + + POSTING_READ(reg); + udelay(100); + } /* Enable panel fitting for LVDS */ if (dev_priv->pch_pf_size && @@ -2100,6 +2175,10 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) intel_flush_display_plane(dev, plane); } + /* Skip the PCH stuff if possible */ + if (!is_pch_port) + goto done; + /* For PCH output, training FDI link */ if (IS_GEN6(dev)) gen6_fdi_link_train(crtc); @@ -2184,7 +2263,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) I915_WRITE(reg, temp | TRANS_ENABLE); if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) DRM_ERROR("failed to enable transcoder %d\n", pipe); - +done: intel_crtc_load_lut(crtc); intel_update_fbc(dev); intel_crtc_update_cursor(crtc, true); @@ -6203,7 +6282,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv) * userspace... */ I915_WRITE(GEN6_RC_STATE, 0); - __gen6_force_wake_get(dev_priv); + __gen6_gt_force_wake_get(dev_priv); /* disable the counters and set deterministic thresholds */ I915_WRITE(GEN6_RC_CONTROL, 0); @@ -6301,7 +6380,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv) /* enable all PM interrupts */ I915_WRITE(GEN6_PMINTRMSK, 0); - __gen6_force_wake_put(dev_priv); + __gen6_gt_force_wake_put(dev_priv); } void intel_enable_clock_gating(struct drm_device *dev) @@ -6496,7 +6575,7 @@ static void ironlake_disable_rc6(struct drm_device *dev) POSTING_READ(RSTDBYCTL); } - ironlake_disable_rc6(dev); + ironlake_teardown_rc6(dev); } static int ironlake_setup_rc6(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index d860abe..f8f86e5 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -30,6 +30,8 @@ #include "intel_drv.h" +#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */ + void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, struct drm_display_mode *adjusted_mode) @@ -110,6 +112,19 @@ done: dev_priv->pch_pf_size = (width << 16) | height; } +static int is_backlight_combination_mode(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (INTEL_INFO(dev)->gen >= 4) + return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE; + + if (IS_GEN2(dev)) + return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE; + + return 0; +} + static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv) { u32 val; @@ -166,6 +181,9 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev) if (INTEL_INFO(dev)->gen < 4) max &= ~1; } + + if (is_backlight_combination_mode(dev)) + max *= 0xff; } DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max); @@ -183,6 +201,14 @@ u32 intel_panel_get_backlight(struct drm_device *dev) val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; if (IS_PINEVIEW(dev)) val >>= 1; + + if (is_backlight_combination_mode(dev)){ + u8 lbpc; + + val &= ~1; + pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc); + val *= lbpc; + } } DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val); @@ -205,6 +231,16 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level) if (HAS_PCH_SPLIT(dev)) return intel_pch_panel_set_backlight(dev, level); + + if (is_backlight_combination_mode(dev)){ + u32 max = intel_panel_get_max_backlight(dev); + u8 lbpc; + + lbpc = level * 0xfe / max + 1; + level /= lbpc; + pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc); + } + tmp = I915_READ(BLC_PWM_CTL); if (IS_PINEVIEW(dev)) { tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 6d6fde8..3430686 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -14,22 +14,23 @@ struct intel_hw_status_page { struct drm_i915_gem_object *obj; }; -#define I915_RING_READ(reg) i915_safe_read(dev_priv, reg) +#define I915_RING_READ(reg) i915_gt_read(dev_priv, reg) +#define I915_RING_WRITE(reg, val) i915_gt_write(dev_priv, reg, val) #define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base)) -#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) +#define I915_WRITE_TAIL(ring, val) I915_RING_WRITE(RING_TAIL((ring)->mmio_base), val) #define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base)) -#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) +#define I915_WRITE_START(ring, val) I915_RING_WRITE(RING_START((ring)->mmio_base), val) #define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD((ring)->mmio_base)) -#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) +#define I915_WRITE_HEAD(ring, val) I915_RING_WRITE(RING_HEAD((ring)->mmio_base), val) #define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base)) -#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) +#define I915_WRITE_CTL(ring, val) I915_RING_WRITE(RING_CTL((ring)->mmio_base), val) -#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) #define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base)) +#define I915_WRITE_IMR(ring, val) I915_RING_WRITE(RING_IMR((ring)->mmio_base), val) #define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base)) #define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base)) diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index d38a4d9..a521840 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -49,7 +49,10 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) DRM_ERROR("bo %p still attached to GEM object\n", bo); nv10_mem_put_tile_region(dev, nvbo->tile, NULL); - nouveau_vm_put(&nvbo->vma); + if (nvbo->vma.node) { + nouveau_vm_unmap(&nvbo->vma); + nouveau_vm_put(&nvbo->vma); + } kfree(nvbo); } diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c index 65699bf..b368ed7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.c +++ b/drivers/gpu/drm/nouveau/nouveau_dma.c @@ -83,7 +83,8 @@ nouveau_dma_init(struct nouveau_channel *chan) return ret; /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */ - ret = nouveau_notifier_alloc(chan, NvNotify0, 32, &chan->m2mf_ntfy); + ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfd0, 0x1000, + &chan->m2mf_ntfy); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 9821fca..982d70b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -852,7 +852,8 @@ extern const struct ttm_mem_type_manager_func nouveau_vram_manager; extern int nouveau_notifier_init_channel(struct nouveau_channel *); extern void nouveau_notifier_takedown_channel(struct nouveau_channel *); extern int nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle, - int cout, uint32_t *offset); + int cout, uint32_t start, uint32_t end, + uint32_t *offset); extern int nouveau_notifier_offset(struct nouveau_gpuobj *, uint32_t *); extern int nouveau_ioctl_notifier_alloc(struct drm_device *, void *data, struct drm_file *); diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 26347b7..b0fb9bd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -725,8 +725,10 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man, ret = vram->get(dev, mem->num_pages << PAGE_SHIFT, mem->page_alignment << PAGE_SHIFT, size_nc, (nvbo->tile_flags >> 8) & 0xff, &node); - if (ret) - return ret; + if (ret) { + mem->mm_node = NULL; + return (ret == -ENOSPC) ? 0 : ret; + } node->page_shift = 12; if (nvbo->vma.node) diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/nouveau_mm.c index 8844b50..7609756 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mm.c +++ b/drivers/gpu/drm/nouveau/nouveau_mm.c @@ -123,7 +123,7 @@ nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc, return 0; } - return -ENOMEM; + return -ENOSPC; } int diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c index fe29d60..5ea1676 100644 --- a/drivers/gpu/drm/nouveau/nouveau_notifier.c +++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c @@ -96,7 +96,8 @@ nouveau_notifier_gpuobj_dtor(struct drm_device *dev, int nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, - int size, uint32_t *b_offset) + int size, uint32_t start, uint32_t end, + uint32_t *b_offset) { struct drm_device *dev = chan->dev; struct nouveau_gpuobj *nobj = NULL; @@ -104,9 +105,10 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, uint32_t offset; int target, ret; - mem = drm_mm_search_free(&chan->notifier_heap, size, 0, 0); + mem = drm_mm_search_free_in_range(&chan->notifier_heap, size, 0, + start, end, 0); if (mem) - mem = drm_mm_get_block(mem, size, 0); + mem = drm_mm_get_block_range(mem, size, 0, start, end); if (!mem) { NV_ERROR(dev, "Channel %d notifier block full\n", chan->id); return -ENOMEM; @@ -177,7 +179,8 @@ nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data, if (IS_ERR(chan)) return PTR_ERR(chan); - ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset); + ret = nouveau_notifier_alloc(chan, na->handle, na->size, 0, 0x1000, + &na->offset); nouveau_channel_put(&chan); return ret; } diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index ea00418..e57caa2 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c @@ -403,16 +403,24 @@ nv50_instmem_unmap(struct nouveau_gpuobj *gpuobj) void nv50_instmem_flush(struct drm_device *dev) { + struct drm_nouveau_private *dev_priv = dev->dev_private; + + spin_lock(&dev_priv->ramin_lock); nv_wr32(dev, 0x00330c, 0x00000001); if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000)) NV_ERROR(dev, "PRAMIN flush timeout\n"); + spin_unlock(&dev_priv->ramin_lock); } void nv84_instmem_flush(struct drm_device *dev) { + struct drm_nouveau_private *dev_priv = dev->dev_private; + + spin_lock(&dev_priv->ramin_lock); nv_wr32(dev, 0x070000, 0x00000001); if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000)) NV_ERROR(dev, "PRAMIN flush timeout\n"); + spin_unlock(&dev_priv->ramin_lock); } diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c index 459ff08..6144156 100644 --- a/drivers/gpu/drm/nouveau/nv50_vm.c +++ b/drivers/gpu/drm/nouveau/nv50_vm.c @@ -169,7 +169,11 @@ nv50_vm_flush(struct nouveau_vm *vm) void nv50_vm_flush_engine(struct drm_device *dev, int engine) { + struct drm_nouveau_private *dev_priv = dev->dev_private; + + spin_lock(&dev_priv->ramin_lock); nv_wr32(dev, 0x100c80, (engine << 16) | 1); if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000)) NV_ERROR(dev, "vm flush timeout: engine %d\n", engine); + spin_unlock(&dev_priv->ramin_lock); } diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 56deae5..93fa735 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -3490,7 +3490,7 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track track->num_texture = 16; track->maxy = 4096; track->separate_cube = 0; - track->aaresolve = true; + track->aaresolve = false; track->aa.robj = NULL; } @@ -3801,8 +3801,6 @@ static int r100_startup(struct radeon_device *rdev) r100_mc_program(rdev); /* Resume clock */ r100_clock_startup(rdev); - /* Initialize GPU configuration (# pipes, ...) */ -// r100_gpu_init(rdev); /* Initialize GART (initialize after TTM so we can allocate * memory through TTM but finalize after TTM) */ r100_enable_bm(rdev); diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 0e65709..3e7e7f9 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -971,7 +971,7 @@ void radeon_compute_pll_legacy(struct radeon_pll *pll, max_fractional_feed_div = pll->max_frac_feedback_div; } - for (post_div = min_post_div; post_div <= max_post_div; ++post_div) { + for (post_div = max_post_div; post_div >= min_post_div; --post_div) { uint32_t ref_div; if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 66324b5..cc44bdf 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -113,11 +113,14 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev, u32 tiling_flags = 0; int ret; int aligned_size, size; + int height = mode_cmd->height; /* need to align pitch with crtc limits */ mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8); - size = mode_cmd->pitch * mode_cmd->height; + if (rdev->family >= CHIP_R600) + height = ALIGN(mode_cmd->height, 8); + size = mode_cmd->pitch * height; aligned_size = ALIGN(size, PAGE_SIZE); ret = radeon_gem_object_create(rdev, aligned_size, 0, RADEON_GEM_DOMAIN_VRAM, |